diff --git a/packs/appdynamics-collectors-1.22.1287/README.md b/packs/appdynamics-collectors-1.22.1287/README.md new file mode 100644 index 00000000..f3019e92 --- /dev/null +++ b/packs/appdynamics-collectors-1.22.1287/README.md @@ -0,0 +1,26 @@ +## The Appdynamics Collector Cloud Helm Charts +An Add-on pack for Spectro Cloud to use the monitoring of k8s cluster with AppDynamics collectors. + +## Appdynamics Cloud Helm Charts +This repository maintains helm charts for installing Appdynamics Cloud Collector. + +## Parameters +| Parameter | Description | +|-----------|-------------| +| clusterName | String to specify the name of the k8s cluster | +| endpoint | The endpoint Tenant to which you want to send the data to. Please refer the product guide link from References for more details | +| clientId | clientId of your Tenant . Please refer the product guide link from References for more details | +| clientSecret | clientSecret of your Tenant. Please refer the product guide link from References for more details | +| tokenUrl | tokenUrl of your Tenant. Please refer the product guide link from References for more details | +| tenantId | tenantId of your Tenant. Please refer the product guide link from References for more details | + + +## References +To enable log collection for your cluster, please refer: +https://docs.appdynamics.com/fso/cloud-native-app-obs/en/kubernetes-and-app-service-monitoring/log-collection/onboard-logs-from-kubernetes/configure-the-log-collector + +Here is the guide to auto-instrument your application. +https://docs.appdynamics.com/fso/cloud-native-app-obs/en/kubernetes-and-app-service-monitoring/application-performance-monitoring-with-opentelemetry/configure-services-running-inside-a-supported-kubernetes-cluster/auto-instrument-your-services-using-opentelemetry-operator-for-kubernetes + +Here is the complete product guide about the AppDynamics collectors. +https://docs.appdynamics.com/fso/cloud-native-app-obs/en/kubernetes-and-app-service-monitoring/install-kubernetes-and-app-service-monitoring diff --git a/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors-1.22.1287.tgz b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors-1.22.1287.tgz new file mode 100644 index 00000000..ffd9f6fd Binary files /dev/null and b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors-1.22.1287.tgz differ diff --git a/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/.helmignore b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/.helmignore new file mode 100644 index 00000000..3c659371 --- /dev/null +++ b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/.helmignore @@ -0,0 +1,26 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*.orig +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ +eks-helper/ + +LICENSES/ \ No newline at end of file diff --git a/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/Chart.lock b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/Chart.lock new file mode 100644 index 00000000..0af24d2d --- /dev/null +++ b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/Chart.lock @@ -0,0 +1,27 @@ +dependencies: +- name: appdynamics-cloud-k8s-monitoring + repository: "" + version: 1.22.0 +- name: appdynamics-cloud-db-collector + repository: "" + version: 1.9.0 +- name: appdynamics-network-monitoring + repository: "" + version: 0.2.0 +- name: appdynamics-csaas-k8s-cluster-agent + repository: "" + version: 1.19.0 +- name: appdynamics-otel-collector + repository: https://artifactory.bare.appdynamics.com/artifactory/appd-helm + version: 24.7.0-1646 +- name: appdynamics-security-collector + repository: https://artifactory.bare.appdynamics.com/artifactory/appd-helm + version: 1.0.26 +- name: appdynamics-otel-instrumentation + repository: https://artifactory.bare.appdynamics.com/artifactory/appd-helm + version: 24.4.0-1589 +- name: appdynamics-auto-instrumentation-agent + repository: "" + version: 1.19.0 +digest: sha256:4994a2bff71c0f7cd9ad85ce8d073457ec5aba566a25b4fa585c3f01e4d7ca82 +generated: "2024-07-29T04:41:38.754051677Z" diff --git a/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/Chart.yaml b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/Chart.yaml new file mode 100644 index 00000000..f12cca39 --- /dev/null +++ b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/Chart.yaml @@ -0,0 +1,50 @@ +apiVersion: v2 +appVersion: 1.22.1287 +dependencies: +- name: appdynamics-cloud-k8s-monitoring + repository: "" + version: 1.22.0 +- name: appdynamics-cloud-db-collector + repository: "" + version: 1.9.0 +- condition: appdynamics-network-monitoring.enabled + name: appdynamics-network-monitoring + repository: "" + version: 0.2.0 +- name: appdynamics-csaas-k8s-cluster-agent + repository: "" + version: 1.19.0 +- condition: appdynamics-otel-collector.enabled + name: appdynamics-otel-collector + repository: https://artifactory.bare.appdynamics.com/artifactory/appd-helm + version: 24.7.0-1646 +- condition: appdynamics-security-collector.enabled + name: appdynamics-security-collector + repository: https://artifactory.bare.appdynamics.com/artifactory/appd-helm + version: 1.0.26 +- condition: appdynamics-otel-instrumentation.enabled + name: appdynamics-otel-instrumentation + repository: https://artifactory.bare.appdynamics.com/artifactory/appd-helm + version: 24.4.0-1589 +- condition: appdynamics-auto-instrumentation-agent.enabled + name: appdynamics-auto-instrumentation-agent + repository: "" + version: 1.19.0 +description: Helm Charts for installing Appdynamics Collectors +home: https://appdynamics.com +icon: https://raw.githubusercontent.com/CiscoDevNet/appdynamics-charts/master/logo.png +keywords: +- appdynamics +- cloud +- collector +- cluster +- kubernetes +- monitoring +- pod +- deployment +maintainers: +- email: support@appdynamics.com + name: AppDynamics +name: appdynamics-collectors +type: application +version: 1.22.1287 diff --git a/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/LICENSE b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/LICENSE new file mode 100644 index 00000000..d17050b1 --- /dev/null +++ b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/LICENSE @@ -0,0 +1,11 @@ +APPDYNAMICS +END USER LICENSE AGREEMENT + +By accessing the Software herein, you (and the organization you represent) ("You") acknowledge and agree that the use +of the Software and open source software are governed by (1) the General Terms found at +https://www.cisco.com/c/dam/en_us/about/doing_business/legal/Cisco_General_Terms.pdf and the applicable Product +Specific Terms found at https://www.cisco.com/c/en/us/about/legal/cloud-and-software/software-terms.html or (2) any +other superseding agreement between AppDynamics, or its parent company Cisco Systems, Inc., as applicable, and You. +References to End User in any superseding agreement shall mean You. + +AppDynamics Proprietary and Confidential * Revision 2024.03 diff --git a/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-auto-instrumentation-agent/Chart.yaml b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-auto-instrumentation-agent/Chart.yaml new file mode 100644 index 00000000..72ed172a --- /dev/null +++ b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-auto-instrumentation-agent/Chart.yaml @@ -0,0 +1,18 @@ +apiVersion: v2 +appVersion: 24.4.0 +description: Sophisticated helm chart to deploy appdynamics auto instrumentation agent +home: https://appdynamics.com +icon: https://raw.githubusercontent.com/CiscoDevNet/appdynamics-charts/master/logo.png +keywords: +- appdynamics +- cluster +- kubernetes +- openshift +- monitoring +- pod +- deployment +maintainers: +- email: support@appdynamics.com + name: AppDynamics +name: appdynamics-auto-instrumentation-agent +version: 1.19.0 diff --git a/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-auto-instrumentation-agent/README.md b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-auto-instrumentation-agent/README.md new file mode 100644 index 00000000..38c0d04b --- /dev/null +++ b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-auto-instrumentation-agent/README.md @@ -0,0 +1,32 @@ +# Appdynamics Helm Chart + +### Add AppDynamics helm repo + +### Create values yaml to override default ones +```yaml +imageInfo: + agentImage: docker.io/appdynamics/auto-instrumentation-agent + agentTag: 24.4.0 + imagePullPolicy: Always + imagePullSecret: null + +controllerInfo: + url: + account: + username: + password: + accessKey: + +agentServiceAccount: appdynamics-auto-instrumentation-agent + +instrumentationConfig: + enabled: true + containerAppCorrelationMethod: proxy +``` +### Install auto-instrumentation-agent using helm chart +```bash +helm install auto-instrumentation-agent appdynamics-charts/auto-instrumentation-agent -f .yaml --namespace appdynamics +``` + +### Note: +auto instrumentation agent installation is independent of otel collector. \ No newline at end of file diff --git a/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-auto-instrumentation-agent/templates/_helper.tpl b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-auto-instrumentation-agent/templates/_helper.tpl new file mode 100644 index 00000000..ea40b4ed --- /dev/null +++ b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-auto-instrumentation-agent/templates/_helper.tpl @@ -0,0 +1,3 @@ +{{- define "appdynamics-auto-instrumentation-agent.sensitiveDataController" -}} +{{ (get . "data") | trim | b64enc | required (get . "message") }} +{{- end -}} diff --git a/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-auto-instrumentation-agent/templates/auto-instrumentation-agent.yaml b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-auto-instrumentation-agent/templates/auto-instrumentation-agent.yaml new file mode 100644 index 00000000..0022a3a2 --- /dev/null +++ b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-auto-instrumentation-agent/templates/auto-instrumentation-agent.yaml @@ -0,0 +1,174 @@ +apiVersion: cluster.appdynamics.com/v1alpha1 +kind: AutoInstrumentationAgent +metadata: + name: {{ (cat (regexReplaceAll "([^a-zA-Z0-9-.]+)" (lower (.Values.autoInstrumentationAgent.agentName | default (cat .Release.Name "-" "ai-agent" | nospace))) "-") "-" .Release.Namespace | nospace) | trunc 63 }} + namespace: {{ .Release.Namespace }} +spec: + agentName: {{ cat .Release.Name "-" "ai-agent" "-" .Release.Namespace }} + controllerUrl: {{ required "AppDynamics controller URL is required!" .Values.controllerInfo.url }} + account: {{ required "AppDynamics controller account is required!" .Values.controllerInfo.account }} + image: {{ .Values.imageInfo.agentImage }}:{{ .Values.imageInfo.agentTag }} + serviceAccountName: {{ .Values.agentServiceAccount }} + + {{ if .Values.controllerInfo.customSSLCert -}} + # Custom SSL config + customSSLSecret: auto-instrumentation-agent-ssl-cert + {{- end }} + + {{ with .Values.controllerInfo -}} + {{ if .proxyUrl -}} + # Proxy config + proxyUrl: {{ .proxyUrl }} + {{ if .authenticateProxy -}} + proxyUser: {{ required "Proxy user is required to authenticate proxy!" .proxyUser }} + {{- end }} + {{- end }} + {{- end }} + + # Log specific properties + {{ with .Values.logProperties -}} + logFileSizeMb: {{ .logFileSizeMb }} + logFileBackups: {{ .logFileBackups }} + logLevel: {{ .logLevel }} + {{- end }} + + + {{ if .Values.imageInfo.imagePullPolicy -}} + # Image pull policy + imagePullPolicy: {{ .Values.imageInfo.imagePullPolicy }} + {{- end }} + + {{ if .Values.imageInfo.imagePullSecret -}} + # Image pull secret + imagePullSecret: {{ .Values.imageInfo.imagePullSecret }} + {{- end }} + + #labels + labels: + {{- toYaml .Values.autoInstrumentationAgent.labels | nindent 4 }} + + # Node selector + nodeSelector: + {{- toYaml .Values.autoInstrumentationAgent.nodeSelector | nindent 4 }} + + # Tolerations + tolerations: + {{- toYaml .Values.autoInstrumentationAgent.tolerations | nindent 4 }} + + # Resources + resources: + {{- toYaml .Values.autoInstrumentationAgent.resources | nindent 4 }} + + {{ with .Values.instrumentationConfig -}} + # Instrumentation config + {{ if .defaultAppName -}} + defaultAppName: {{ .defaultAppName }} + {{- end }} + {{ if .defaultEnv -}} + defaultEnv: {{ .defaultEnv }} + {{- end }} + {{ if .defaultInstrumentationLabelMatch -}} + defaultInstrumentationLabelMatch: + {{- toYaml .defaultInstrumentationLabelMatch | nindent 4 }} + {{- end }} + {{ if .defaultInstrumentMatchString -}} + defaultInstrumentMatchString: {{ .defaultInstrumentMatchString }} + {{- end }} + {{ if .defaultCustomConfig -}} + defaultCustomConfig: {{ .defaultCustomConfig }} + {{- end }} + {{ if .appNameStrategy -}} + appNameStrategy: {{ .appNameStrategy }} + {{- end }} + {{ if .tierNameStrategy -}} + tierNameStrategy: {{ .tierNameStrategy }} + {{- end }} + {{ if .appNameLabel -}} + appNameLabel: {{ .appNameLabel }} + {{- end }} + {{ if .tierNameLabel -}} + tierNameLabel: {{ .tierNameLabel }} + {{- end }} + {{ if .nodeName -}} + nodeName: {{ .nodeName }} + {{- end }} + {{ if .imageInfo -}} + imageInfo: + {{- toYaml .imageInfo | nindent 4}} + {{- end }} + {{ if .instrumentationMethod -}} + instrumentationMethod: {{ .instrumentationMethod }} + {{- end }} + {{ if .resourcesToInstrument -}} + resourcesToInstrument: + {{- toYaml .resourcesToInstrument | nindent 4 }} + {{- end }} + {{ if .instrumentationRules -}} + instrumentationRules: + {{- toYaml .instrumentationRules | nindent 4 }} + {{- end }} + {{ if .nsToInstrumentRegex -}} + nsToInstrumentRegex: {{ .nsToInstrumentRegex }} + {{- end }} + {{ if .netvizInfo -}} + netvizInfo: + {{- toYaml .netvizInfo | nindent 4 }} + {{- end }} + {{ if .runAsUser -}} + runAsUser: {{ .runAsUser }} + {{- end }} + {{ if .runAsGroup -}} + runAsGroup: {{ .runAsGroup }} + {{- end }} + {{ if .runAsNonRoot -}} + runAsNonRoot: {{ .runAsNonRoot }} + {{- end }} + {{ if .readOnlyRootFilesystem -}} + readOnlyRootFilesystem: {{ .readOnlyRootFilesystem }} + {{- end }} + {{ if .allowPrivilegeEscalation -}} + allowPrivilegeEscalation: {{ .allowPrivilegeEscalation }} + {{- end }} + {{ if .capabilities -}} + capabilities: {{ .capabilities }} + {{- end }} + {{ if .seccompProfile -}} + seccompProfile: {{ .seccompProfile }} + {{- end }} + {{ if .windowsOptions -}} + windowsOptions: {{ .windowsOptions }} + {{- end }} + {{ if .seLinuxOptions -}} + seLinuxOptions: {{ .seLinuxOptions }} + {{- end }} + {{ if .procMount -}} + procMount: {{ .procMount }} + {{- end }} + {{ if .privileged -}} + privileged: {{ .privileged }} + {{- end }} + {{ if .numberOfTaskWorkers -}} + numberOfTaskWorkers: {{ .numberOfTaskWorkers }} + {{- end }} + {{ if .defaultAnalyticsHost -}} + defaultAnalyticsHost: {{ .defaultAnalyticsHost }} + {{- end }} + {{ if .defaultAnalyticsPort -}} + defaultAnalyticsPort: {{ .defaultAnalyticsPort }} + {{- end }} + {{ if .defaultAnalyticsSslEnabled -}} + defaultAnalyticsSslEnabled: {{ .defaultAnalyticsSslEnabled }} + {{- end }} + {{ if .enableInstallationReport -}} + enableInstallationReport: {{ .enableInstallationReport }} + {{ end -}} + {{ if .enableForceReInstrumentation -}} + enableForceReInstrumentation: {{ .enableForceReInstrumentation }} + {{ end -}} + {{if .containerAppCorrelationMethod -}} + containerAppCorrelationMethod: {{ .containerAppCorrelationMethod }} + {{ end -}} + {{if .metadataServerPort -}} + metadataServerPort: {{ .metadataServerPort }} + {{ end -}} + {{ end -}} \ No newline at end of file diff --git a/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-auto-instrumentation-agent/templates/rbac/cr-agent-instrumentation.yaml b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-auto-instrumentation-agent/templates/rbac/cr-agent-instrumentation.yaml new file mode 100644 index 00000000..5391eabb --- /dev/null +++ b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-auto-instrumentation-agent/templates/rbac/cr-agent-instrumentation.yaml @@ -0,0 +1,63 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: appdynamics-auto-instrumentation-agent-{{.Release.Name}} +rules: +- apiGroups: + - "" + resources: + - pods + - pods/exec + - secrets + - configmaps + verbs: + - create + - update + - delete +- apiGroups: + - apps + resources: + - daemonsets + - statefulsets + - deployments + - replicasets + verbs: + - update +- apiGroups: + - "" + resources: + - namespaces + verbs: + - get + - update + - list +{{ if .Capabilities.APIVersions.Has "apps.openshift.io/v1/DeploymentConfig" -}} +- apiGroups: + - apps.openshift.io + resources: + - deploymentconfigs + verbs: + - update +{{- end }} +{{ with .containerAppCorrelationMethod }} +{{ if eq . "kubeapi" }} +- apiGroups: + - "" + resources: + - serviceaccounts + verbs: + - get + - create + - delete +- apiGroups: + - rbac.authorization.k8s.io + resources: + - roles + - rolebindings + verbs: + - get + - create + - update + - delete +{{- end }} +{{- end }} \ No newline at end of file diff --git a/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-auto-instrumentation-agent/templates/rbac/crb-agent-instrumentation.yaml b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-auto-instrumentation-agent/templates/rbac/crb-agent-instrumentation.yaml new file mode 100644 index 00000000..a2468f7d --- /dev/null +++ b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-auto-instrumentation-agent/templates/rbac/crb-agent-instrumentation.yaml @@ -0,0 +1,14 @@ +{{ $agentSA := .Values.agentServiceAccount }} +{{ $namespace := .Release.Namespace }} +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: appdynamics-auto-instrumentation-agent-{{.Release.Name}} +subjects: + kind: ServiceAccount + name: {{ $agentSA }} + namespace: {{ $namespace }} +roleRef: + kind: ClusterRole + name: appdynamics-auto-instrumentation-agent-instrumentation-{{.Release.Name}} + apiGroup: rbac.authorization.k8s.io \ No newline at end of file diff --git a/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-auto-instrumentation-agent/templates/rbac/sa-agent.yaml b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-auto-instrumentation-agent/templates/rbac/sa-agent.yaml new file mode 100644 index 00000000..64f85107 --- /dev/null +++ b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-auto-instrumentation-agent/templates/rbac/sa-agent.yaml @@ -0,0 +1,5 @@ +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ .Values.agentServiceAccount }} + namespace: {{ .Release.Namespace }} \ No newline at end of file diff --git a/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-auto-instrumentation-agent/templates/secrets/secret-auto-instrumentation-agent.yaml b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-auto-instrumentation-agent/templates/secrets/secret-auto-instrumentation-agent.yaml new file mode 100644 index 00000000..38531ca6 --- /dev/null +++ b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-auto-instrumentation-agent/templates/secrets/secret-auto-instrumentation-agent.yaml @@ -0,0 +1,23 @@ +{{ $secret := (lookup "v1" "Secret" .Release.Namespace "auto-instrumentation-agent-secret") }} +{{ $annotations := dict "temp" "temp" }} +{{ if $secret }} + {{ $annotations = $secret.metadata.annotations }} +{{ end }} +{{ if or (not ($secret)) (get $annotations "appdynamics.helm.charts/created-by") }} +{{ $namespace := .Release.Namespace }} +apiVersion: v1 +kind: Secret +metadata: + name: auto-instrumentation-agent-secret + namespace: {{ $namespace }} + annotations: + appdynamics.helm.charts/created-by: auto-instrumentation-agent-helm-chart +type: Opaque +data: + {{ with .Values.controllerInfo -}} + controller-key: {{ include "appdynamics-auto-instrumentation-agent.sensitiveDataController" (dict "data" .accessKey "message" "AppDynamics controller access key is required!") }} + {{- end -}} + {{ with .Values.controllerInfo }} + api-user: {{ cat (.username | trim | required "AppDynamics controller username is required!") "@" (.account | trim | required "AppDynamics controller account is required!") ":" (.password | trim | required "Appdynamics controller password is required!") | nospace | b64enc -}} + {{- end -}} +{{ end -}} \ No newline at end of file diff --git a/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-auto-instrumentation-agent/templates/secrets/secret-custom-ssl.yaml b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-auto-instrumentation-agent/templates/secrets/secret-custom-ssl.yaml new file mode 100644 index 00000000..a02204b2 --- /dev/null +++ b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-auto-instrumentation-agent/templates/secrets/secret-custom-ssl.yaml @@ -0,0 +1,22 @@ +{{ $secret := (lookup "v1" "Secret" .Release.Namespace "auto-instrumentation-agent-ssl-cert") }} +{{ $annotations := dict "temp" "temp" }} +{{ if $secret }} + {{ $annotations = $secret.metadata.annotations }} +{{ end }} +{{ if or (not ($secret)) (get $annotations "appdynamics.helm.charts/created-by") }} +{{ $namespace := .Release.Namespace }} +{{ with .Values -}} +{{ if .controllerInfo.customSSLCert -}} +apiVersion: v1 +kind: Secret +metadata: + name: auto-instrumentation-ssl-cert + namespace: {{ $namespace }} + annotations: + appdynamics.helm.charts/created-by: auto-instrumentation-agent-helm-chart +type: Opaque +data: + "custom-ssl.pem": {{ .controllerInfo.customSSLCert }} +{{ end -}} +{{ end -}} +{{ end -}} \ No newline at end of file diff --git a/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-auto-instrumentation-agent/templates/secrets/secret-proxy-secret.yaml b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-auto-instrumentation-agent/templates/secrets/secret-proxy-secret.yaml new file mode 100644 index 00000000..5d1e196e --- /dev/null +++ b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-auto-instrumentation-agent/templates/secrets/secret-proxy-secret.yaml @@ -0,0 +1,25 @@ +{{ $secret := (lookup "v1" "Secret" .Release.Namespace "auto-instrumentation-proxy-secret") }} +{{ $annotations := dict "temp" "temp" }} +{{ if $secret }} + {{ $annotations = $secret.metadata.annotations }} +{{ end }} +{{ if or (not ($secret)) (get $annotations "appdynamics.helm.charts/created-by") }} +{{ $namespace := .Release.Namespace }} +{{ with .Values -}} +{{ if .controllerInfo.authenticateProxy -}} +{{ if not .controllerInfo.proxyUrl -}} +{{ fail "Proxy url is requried to authenticate proxy!" -}} +{{ end -}} +apiVersion: v1 +kind: Secret +metadata: + name: auto-instrumentation-proxy-secret + namespace: {{ $namespace }} + annotations: + appdynamics.helm.charts/created-by: auto-instrumentation-helm-chart +type: Opaque +data: + proxy-password: {{ include "sensitiveData" (dict "data" .controllerInfo.proxyPassword "message" "Proxy password is required!") }} +{{ end -}} +{{ end -}} +{{ end -}} \ No newline at end of file diff --git a/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-auto-instrumentation-agent/values.yaml b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-auto-instrumentation-agent/values.yaml new file mode 100644 index 00000000..4ff50709 --- /dev/null +++ b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-auto-instrumentation-agent/values.yaml @@ -0,0 +1,48 @@ +# Docker images +imageInfo: + agentImage: docker.io/appdynamics/auto-instrumentation-agent + agentTag: 24.4.0 + imagePullPolicy: Always + imagePullSecret: null + +#Auto Instrumentation Agent +autoInstrumentationAgent: + agentName: null + labels: {} + nodeSelector: {} + tolerations: [] + resources: + limits: + cpu: "100m" + memory: "300Mi" + requests: + cpu: "50m" + memory: "150Mi" +# AppDynamics controller info (VALUES TO BE PROVIDED BY THE USER) +controllerInfo: + url: "" + account: "" + username: "" + password: "" + accessKey: "" + customSSLCert: null + + # Proxy config + authenticateProxy: false + proxyUrl: null + proxyUser: null + proxyPassword: null + +# RBAC config +createServiceAccount: true +agentServiceAccount: appdynamics-auto-instrumentation-agent + +# Instrumentation config +instrumentationConfig: + enabled: false + containerAppCorrelationMethod: proxy + +logProperties: + logFileSizeMb: 5 + logFileBackups: 3 + logLevel: INFO \ No newline at end of file diff --git a/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-cloud-db-collector/.helmignore b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-cloud-db-collector/.helmignore new file mode 100644 index 00000000..0e8a0eb3 --- /dev/null +++ b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-cloud-db-collector/.helmignore @@ -0,0 +1,23 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*.orig +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ diff --git a/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-cloud-db-collector/Chart.yaml b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-cloud-db-collector/Chart.yaml new file mode 100644 index 00000000..79af63bd --- /dev/null +++ b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-cloud-db-collector/Chart.yaml @@ -0,0 +1,19 @@ +apiVersion: v2 +appVersion: 24.4.0 +description: Helm Chart to deploy dbcollector as a deployment and corresponding dbconfigs. +home: https://appdynamics.com +icon: https://raw.githubusercontent.com/CiscoDevNet/appdynamics-charts/master/logo.png +keywords: +- appdynamics +- database +- dbcollector +- dbconfig +- kubernetes +- monitoring +- pod +- deployment +maintainers: +- email: support@appdynamics.com + name: AppDynamics +name: appdynamics-cloud-db-collector +version: 1.9.0 diff --git a/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-cloud-db-collector/templates/_helper.tpl b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-cloud-db-collector/templates/_helper.tpl new file mode 100644 index 00000000..a22e302c --- /dev/null +++ b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-cloud-db-collector/templates/_helper.tpl @@ -0,0 +1,23 @@ +{{- define "appdynamics-cloud-db-collector.podConfigs" }} + image: {{ .image }} + imagePullPolicy: {{ .imagePullPolicy }} + resources: + {{- toYaml .resources | nindent 4 }} + labels: + {{- toYaml .labels | nindent 4 }} + annotations: + {{- toYaml .annotations | nindent 4 }} + nodeSelector: + {{- toYaml .nodeSelector | nindent 4 }} + imagePullSecrets: + {{- toYaml .imagePullSecrets | nindent 4 }} + affinity: + {{- toYaml .affinity | nindent 4 }} + tolerations: + {{- toYaml .tolerations | nindent 4 }} + securityContext: + {{- toYaml .securityContext | nindent 4 }} + {{ if .priorityClassName -}} + priorityClassName: {{ .priorityClassName }} + {{- end }} +{{- end }} \ No newline at end of file diff --git a/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-cloud-db-collector/templates/common/_common.tpl b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-cloud-db-collector/templates/common/_common.tpl new file mode 100644 index 00000000..b2da3ade --- /dev/null +++ b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-cloud-db-collector/templates/common/_common.tpl @@ -0,0 +1,11 @@ +{{- define "appdynamics-cloud-db-collector.getClusterId" }} +{{- if .Values.global.smartAgentInstall -}} +{{ "AGENT_PLATFORM_ID_VALUE" }} +{{- else -}} +{{- if (lookup "v1" "Namespace" "" "kube-system").metadata }} +{{- required "Could not fetch kube-system uid to populate clusterID! " (lookup "v1" "Namespace" "" "kube-system").metadata.uid }} +{{- else -}} +{{- .Values.global.clusterId | required "clusterId needs to be specified when kube-system metadata is not accessible!" }} +{{- end }} +{{- end }} +{{- end }} \ No newline at end of file diff --git a/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-cloud-db-collector/templates/dbCollector.yaml b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-cloud-db-collector/templates/dbCollector.yaml new file mode 100644 index 00000000..570d2a35 --- /dev/null +++ b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-cloud-db-collector/templates/dbCollector.yaml @@ -0,0 +1,56 @@ +{{ if .Values.install.dbCollector -}} +apiVersion: cluster.appdynamics.com/v1alpha1 +# Kind will match the Kind section in generated CRD at Operator +kind: DbCollector +# Release is picked up from the command +metadata: + name: {{ printf "%s-%s" .Release.Name "appdynamics-db-collector" | trunc 63 }} + namespace: {{ .Release.Namespace }} +spec: + # spec for dbCollector here + {{- $mergedPodConfig := .Values.dbCollectorPod }} + {{- template "appdynamics-cloud-db-collector.podConfigs" $mergedPodConfig }} + {{ with .Values.appdCloudAuth -}} + # CIS credentials + clientId: {{ .clientId }} + # either clientSecret will be present + {{ if .clientSecret -}} + clientSecret: {{ .clientSecret }} + {{ else }} + # if clientSecret is not present then clientSecretKeyRef is expected + clientSecretVarSource: {{- toYaml .clientSecretEnvVar.valueFrom | nindent 4 }} + {{- end }} + endpoint: {{ .endpoint }} + tokenUrl: {{ .tokenUrl }} + + {{- end }} + + {{ with .Values.dbCollectorConfig -}} + os: "linux" + arch: "amd64" + # Profiling flag and port + pprofEnable: {{ .pprof.enabled }} + {{ if .pprof.enabled -}} + pprofPort: {{ .pprof.port }} + {{- end }} + # Prometheus flag and port + metricEnable: {{ .metric.enabled }} + {{ if .metric.enabled -}} + metricPort: {{ .metric.port }} + {{- end }} + # Log level + logLevel : {{ .logLevel }} + {{- end }} + {{ if ne .Values.dbCollectorPod.image "appdynamics/appdynamics-cloud-db-collector:23.2.0-539"}} + # Mount path for generated config + configMountPath: /opt/appdynamics/appddbcol/conf/generated + {{ end }} + # To control the Agent Management client start by db collector + agentManagementEnabled: {{ .Values.install.agentManagementEnabled }} + # Name of the running instance of db collector. Optional, should be provided by user to distinguish multiple db collector instance on same platform. + collectorInstanceName: {{.Values.dbCollectorName}} + # Name of the Agent should be fixed to release name. Not modified by end user. + collectorName: {{ .Release.Name }} + clusterName: {{ .Values.global.clusterName }} + clusterID: {{ include "appdynamics-cloud-db-collector.getClusterId" . }} +{{ end -}} \ No newline at end of file diff --git a/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-cloud-db-collector/templates/dbConfig.yaml b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-cloud-db-collector/templates/dbConfig.yaml new file mode 100644 index 00000000..cd781c89 --- /dev/null +++ b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-cloud-db-collector/templates/dbConfig.yaml @@ -0,0 +1,61 @@ +{{ if .Values.install.dbMonitoringConfigs -}} +# loop over available dbconfigs to create multiple CRs +{{- range .Values.dbMonitoringConfigs }} +# convert given configname to lower case +{{ $configResName := lower .configName }} +# default dbPasswordSecretName +{{- $dbPasswordSecretName := printf "%s-%s-%s" $.Release.Name $configResName "db-password"}} +--- +apiVersion: cluster.appdynamics.com/v1alpha1 +# Kind will match the Kind section in generated CRD at Operator +kind: DbConfig +# Release is picked up from the command +metadata: + name: {{ printf "%s-%s" $.Release.Name $configResName | trunc 255 }} + namespace: {{ $.Release.Namespace }} +spec: + # spec for dbConfigs here + dbType: {{ .dbType }} + configName: {{ .configName }} + # if collector name is specified, otherwise default to collector deployed via current release + {{ if not .dbCollector }} + dbCollector: + name: {{ printf "%s-%s" $.Release.Name "appdynamics-db-collector" | trunc 63 }} + namespace: {{ $.Release.Namespace }} + {{ else }} + dbCollector: + name: {{ .dbCollector.name }} + namespace: {{ .dbCollector.namespace }} + {{ end }} + hostname: {{ .hostname }} + hostport: {{ .hostport }} + username: {{ .username }} + # if Secret Name containing the password is given + {{ if .passwordSecretName }} + passwordSecretName: {{ .passwordSecretName }} + # otherwise use the created secret + {{ else }} + passwordSecretName: {{ $dbPasswordSecretName}} + {{ end }} + + # database : optional field + {{ if .database -}} + database: {{ .database }} + {{ end -}} + # environment : optional field + {{ if .environment -}} + environment: + platform: {{ .environment.platform }} + {{ end }} + # tlsconfig : optional field + {{ if .tlsConfig -}} + tlsConfig: + # hostnameincertificate : optional field + {{ if .tlsConfig.hostNameInCertificate -}} + hostNameInCertificate: {{ .tlsConfig.hostNameInCertificate }} + {{- end }} + certSecretName: {{ .tlsConfig.certSecretName }} + + {{ end }} +{{- end }} +{{ end -}} diff --git a/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-cloud-db-collector/templates/dbConfigDbPasswordSecrets.yaml b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-cloud-db-collector/templates/dbConfigDbPasswordSecrets.yaml new file mode 100644 index 00000000..4124c03e --- /dev/null +++ b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-cloud-db-collector/templates/dbConfigDbPasswordSecrets.yaml @@ -0,0 +1,24 @@ +# This manifest creates a default secret for the dbconfig whose secret is passed as a string + +{{ if .Values.install.dbMonitoringConfigs -}} +# loop over available dbconfigs to create multiple CRs +{{- range .Values.dbMonitoringConfigs }} +# convert given configname to lower case +{{ $configResName := lower .configName }} +# default dbSecretName +{{- $dbPasswordSecretName := printf "%s-%s-%s" $.Release.Name $configResName "db-password"}} + +# secret to be created if db passwordSecretName is not provided( provided as a string ) +{{ if not .passwordSecretName }} +--- +apiVersion: v1 +kind: Secret +metadata: + name: {{ $dbPasswordSecretName }} + namespace: {{ $.Release.Namespace }} +type: Opaque +stringData: + password: {{ .password }} +{{ end }} +{{- end }} +{{ end -}} diff --git a/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-cloud-db-collector/templates/rbac/dbcollector_openshift_scc.yaml b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-cloud-db-collector/templates/rbac/dbcollector_openshift_scc.yaml new file mode 100644 index 00000000..02135354 --- /dev/null +++ b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-cloud-db-collector/templates/rbac/dbcollector_openshift_scc.yaml @@ -0,0 +1,24 @@ +{{ if or .Values.install.dbCollector .Values.install.dbMonitoringConfigs -}} +{{ if .Capabilities.APIVersions.Has "security.openshift.io/v1/SecurityContextConstraints" -}} +apiVersion: security.openshift.io/v1 +kind: SecurityContextConstraints +metadata: + annotations: + kubernetes.io/description: 'dbcollector-privileged-scc is a custom SCC for AppDynamics Cloud Database Collector' + name: dbcollector-privileged-scc + namespace: {{ .Release.Namespace }} +allowHostDirVolumePlugin: false +allowHostIPC: false +allowHostNetwork: false +allowHostPID: false +allowHostPorts: false +allowPrivilegedContainer: false +readOnlyRootFilesystem: false +runAsUser: + type: RunAsAny +seLinuxContext: + type: MustRunAs +users: + - system:serviceaccount:{{ .Release.Namespace }}:{{ .Values.dbcollectorServiceAccount }} +{{- end }} +{{- end }} \ No newline at end of file diff --git a/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-cloud-db-collector/templates/rbac/dbcollector_service_account.yaml b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-cloud-db-collector/templates/rbac/dbcollector_service_account.yaml new file mode 100644 index 00000000..bc09923e --- /dev/null +++ b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-cloud-db-collector/templates/rbac/dbcollector_service_account.yaml @@ -0,0 +1,7 @@ +{{ if or .Values.install.dbCollector .Values.install.dbMonitoringConfigs -}} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ .Values.dbcollectorServiceAccount }} + namespace: {{ .Release.Namespace }} +{{- end }} \ No newline at end of file diff --git a/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-cloud-db-collector/values.schema.json b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-cloud-db-collector/values.schema.json new file mode 100644 index 00000000..e0a21011 --- /dev/null +++ b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-cloud-db-collector/values.schema.json @@ -0,0 +1,415 @@ +{ + "$schema": "http://json-schema.org/schema#", + "$ref": "#/definitions/Core", + "definitions": { + "Core": { + "type": "object", + "additionalProperties": true, + "properties": { + "dbCollectorName": { + "type": "string" + }, + "appdCloudAuth": { + "$ref": "#/definitions/AppdCloudAuth" + }, + "install": { + "$ref": "#/definitions/Install" + }, + "dbCollectorConfig": { + "$ref": "#/definitions/DBCollectorConfig" + }, + "dbMonitoringConfigs": { + "type": "array", + "items": { + "$ref": "#/definitions/DBMonitoringConfig" + } + }, + "dbCollectorPod": { + "$ref": "#/definitions/DBCollectorPod" + } + }, + "required": [ + "install" + ], + "title": "Core" + }, + "AppdCloudAuth": { + "type": "object", + "additionalProperties": false, + "properties": { + "clientId": { + "type": "string" + }, + "clientSecret": { + "type": "string" + }, + "clientSecretEnvVar": { + "$ref": "#/definitions/ClientSecretEnvVar" + }, + "endpoint": { + "type": "string" + }, + "tokenUrl": { + "type": "string" + } + }, + "anyOf": [ + { "required": + [ "clientSecret" ] + }, + { "required": + [ "clientSecretEnvVar" ] + } + ], + "required": [ + "clientId", + "endpoint", + "tokenUrl" + ], + "title": "AppdCloudAuth" + }, + "ClientSecretEnvVar": { + "type": "object", + "additionalProperties": false, + "properties": { + "valueFrom": { + "$ref": "#/definitions/ValueFrom" + } + }, + "required": [ + "valueFrom" + ], + "title": "ClientSecretEnvVar" + }, + "ValueFrom": { + "type": "object", + "additionalProperties": false, + "properties": { + "secretKeyRef": { + "$ref": "#/definitions/SecretKeyRef" + } + }, + "required": [ + "secretKeyRef" + ], + "title": "ValueFrom" + }, + "SecretKeyRef": { + "type": "object", + "additionalProperties": false, + "properties": { + "name": { + "type": "string" + }, + "key": { + "type": "string" + } + }, + "required": [ + "key", + "name" + ], + "title": "SecretKeyRef" + }, + "DBCollectorConfig": { + "type": "object", + "additionalProperties": false, + "properties": { + "logLevel": { + "type" : "string", + "enum": ["off", "error", "warn","info","debug","all"] + }, + "pprof": { + "$ref": "#/definitions/Pprof" + }, + "metric": { + "$ref": "#/definitions/metric" + } + }, + "required": [ + "logLevel" + ], + "title": "DBCollectorConfig" + }, + "Pprof": { + "type": "object", + "additionalProperties": false, + "properties": { + "enabled": { + "type": "boolean" + }, + "port": { + "type": ["integer","null"] + } + }, + "required": [ + "enabled" + ], + "title": "Pprof" + }, + "metric": { + "type": "object", + "additionalProperties": false, + "properties": { + "enabled": { + "type": "boolean" + }, + "port": { + "type": ["integer","null"] + } + }, + "required": [ + "enabled" + ], + "title": "metric" + }, + "DBCollectorPod": { + "type": "object", + "additionalProperties": false, + "properties": { + "image": { + "type": "string" + }, + "imagePullPolicy": { + "type": "string" + }, + "priorityClassName": { + "type": ["null","string"] + }, + "resources": { + "$ref": "#/definitions/Resources" + }, + "labels": { + "$ref": "#/definitions/Affinity" + }, + "annotations": { + "$ref": "#/definitions/Affinity" + }, + "nodeSelector": { + "$ref": "#/definitions/Affinity" + }, + "imagePullSecrets": { + "type": "array", + "items": { + "type": "object" + } + }, + "affinity": { + "$ref": "#/definitions/Affinity" + }, + "tolerations": { + "type": "array", + "items": { + "type": "object" + } + }, + "securityContext": { + "$ref": "#/definitions/Affinity" + } + }, + "required": [ + "image", + "imagePullPolicy" + ], + "title": "DBCollectorPod" + }, + "Affinity": { + "type": "object", + "additionalProperties": true, + "title": "Affinity" + }, + "Resources": { + "type": "object", + "additionalProperties": false, + "properties": { + "limits": { + "$ref": "#/definitions/Limits" + }, + "requests": { + "$ref": "#/definitions/Limits" + } + }, + "required": [ + "limits", + "requests" + ], + "title": "Resources" + }, + "Limits": { + "type": "object", + "additionalProperties": false, + "properties": { + "cpu": { + "type": "string" + }, + "memory": { + "type": "string" + } + }, + "required": [ + "cpu", + "memory" + ], + "title": "Limits" + }, + "DBMonitoringConfig": { + "type": "object", + "additionalProperties": false, + "properties": { + "dbType": { + "type": "string", + "enum": ["sqlserver","mysql","mariadb","aurora-mysql","postgres", "aurora-postgresql"] + }, + "configName": { + "type": "string" + }, + "dbCollector": { + "$ref": "#/definitions/DbCollector" + }, + "hostname": { + "type": "string" + }, + "hostport": { + "type": ["null","integer"] + }, + "username": { + "type": "string" + }, + "password": { + "type": "string" + }, + "passwordSecretName": { + "type": "string" + }, + "database": { + "type": ["null","string"] + }, + "tlsConfig": { + "$ref": "#/definitions/TLSConfig" + }, + "environment": { + "$ref": "#/definitions/Environment" + } + }, + "required": [ + "dbType", + "configName", + "hostname", + "hostport", + "username" + ], + "if": { + "properties": { + "dbType": { "enum": ["mysql", "mariadb","aurora-mysql"]} + } + }, + "then": { + "not": { + "required": ["database"] + } + }, + "else" : { + "if": { + "properties": { + "dbType": { "enum": ["postgres", "aurora-postgresql"] } + } + }, + "then": { + "anyOf": [ + { + "not": { + "required": ["tlsConfig"] + } + }, + { + "properties": { + "tlsConfig": { + "not": { + "required": ["hostNameInCertificate"] + } + } + } + } + ] + } + }, + "oneOf": [ + { + "required" : [ + "password" + ] + }, + { + "required" : [ + "passwordSecretName" + ] + } + ], + "title": "DBMonitoringConfig" + }, + "DbCollector": { + "type": "object", + "additionalProperties": false, + "properties": { + "name": { + "type": "string" + }, + "namespace": { + "type": "string" + } + }, + "required": [ + "name", + "namespace" + ], + "title": "DbCollector" + }, + "Environment": { + "type": "object", + "additionalProperties": false, + "properties": { + "platform": { + "type": ["null","string"], + "pattern":"^(?i)(azure|aws|self-hosted)$" + } + }, + "title": "Environment" + }, + "TLSConfig": { + "type": "object", + "additionalProperties": false, + "properties": { + "hostNameInCertificate": { + "type": ["null","string"] + }, + "certSecretName": { + "type": "string" + } + }, + "required": [ + "certSecretName" + ], + "title": "TLSConfig" + }, + "Install": { + "type": "object", + "additionalProperties": false, + "properties": { + "dbCollector": { + "type": "boolean" + }, + "dbMonitoringConfigs": { + "type": "boolean" + }, + "agentManagementEnabled" : { + "type": "boolean" + } + }, + "required": [ + "dbCollector", + "dbMonitoringConfigs" + ], + "title": "Install" + } + } +} diff --git a/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-cloud-db-collector/values.yaml b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-cloud-db-collector/values.yaml new file mode 100644 index 00000000..9c9972d5 --- /dev/null +++ b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-cloud-db-collector/values.yaml @@ -0,0 +1,78 @@ +appdCloudAuth : +# Example format + clientId: "" + # Either of clientSecret or ClientSecretEnvVar will be present + clientSecret: "" +# clientSecretEnvVar: +# valueFrom: +# secretKeyRef: +# name: "" +# key: "" + endpoint: "" + tokenUrl: "" + +install: + dbCollector: false + dbMonitoringConfigs: false + agentManagementEnabled: true + +# Database Collector Instance Name to uniquely identify multiple instance in same platform +dbCollectorName: "" +dbcollectorServiceAccount: appdynamics-dbcollector + +# dbCollector Configs +dbCollectorConfig: + logLevel: info +# Enables profiling, Not intended for cust use + pprof: + enabled : false + port : 9811 +# Enables Metric for prometheus + metric: + enabled : false + port : 7000 + + +# each Object in the list will create a custom resource of dbconnection type +dbMonitoringConfigs: [] +# Example +# - dbType: "" +# configName: "" +# # Optional collector configuration +# dbCollector: +# name: "" +# namespace: "" +# hostname: "" +# hostport: null +# username: "" +# password: "" +# # database is optional except for azure sql database +# database: "" +# # tlsConfig is optional +# tlsConfig: +# # hostNameInCertificate in tlsConfig is optional +# hostNameInCertificate: "" +# certSecretName: "" +# # environment is optional +# environment: +# platform: null + + +dbCollectorPod: + image: appdynamics/appdynamics-cloud-db-collector:24.2.0-1084 + imagePullPolicy: Always + priorityClassName: null + resources: + limits: + cpu: 500m + memory: 1000Mi + requests: + cpu: 200m + memory: 750Mi + labels: {} + annotations: {} + nodeSelector: {} + imagePullSecrets: [] + affinity: {} + tolerations: [] + securityContext: {} \ No newline at end of file diff --git a/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-cloud-k8s-monitoring/.helmignore b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-cloud-k8s-monitoring/.helmignore new file mode 100644 index 00000000..0e8a0eb3 --- /dev/null +++ b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-cloud-k8s-monitoring/.helmignore @@ -0,0 +1,23 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*.orig +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ diff --git a/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-cloud-k8s-monitoring/Chart.yaml b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-cloud-k8s-monitoring/Chart.yaml new file mode 100644 index 00000000..5ad4d02f --- /dev/null +++ b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-cloud-k8s-monitoring/Chart.yaml @@ -0,0 +1,18 @@ +apiVersion: v2 +appVersion: 24.7.0 +description: Helm Chart to deploy clustermon as a deployment and infra manager, containermon, + servermon, log collector as a daemonset. +home: https://appdynamics.com +icon: https://raw.githubusercontent.com/CiscoDevNet/appdynamics-charts/master/logo.png +keywords: +- appdynamics +- cluster +- kubernetes +- monitoring +- pod +- deployment +maintainers: +- email: support@appdynamics.com + name: AppDynamics +name: appdynamics-cloud-k8s-monitoring +version: 1.22.0 diff --git a/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-cloud-k8s-monitoring/templates/_helper.tpl b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-cloud-k8s-monitoring/templates/_helper.tpl new file mode 100644 index 00000000..e36bb8fa --- /dev/null +++ b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-cloud-k8s-monitoring/templates/_helper.tpl @@ -0,0 +1,539 @@ +{{- define "appdynamics-cloud-k8s-monitoring.sensitiveData" -}} +{{ (get . "data") | trim | b64enc | required (get . "message") }} +{{- end -}} + +{{- define "appdynamics-cloud-k8s-monitoring.podConfigs" }} + image: {{ .image }} + imagePullPolicy: {{ .imagePullPolicy }} + resources: + {{- toYaml .resources | nindent 4 }} + labels: + {{- toYaml .labels | nindent 4 }} + annotations: + {{- toYaml .annotations | nindent 4 }} + nodeSelector: + {{- toYaml .nodeSelector | nindent 4 }} + imagePullSecrets: + {{- toYaml .imagePullSecrets | nindent 4 }} + affinity: + {{- toYaml .affinity | nindent 4 }} + tolerations: + {{- toYaml .tolerations | nindent 4 }} + securityContext: + {{- toYaml .securityContext | nindent 4 }} + {{ if .priorityClassName -}} + priorityClassName: {{ .priorityClassName }} + {{- end }} +{{- end }} + +{{- define "appdynamics-cloud-k8s-monitoring.osNodeSelector" }} +nodeSelector: + kubernetes.io/os: {{ . }} +{{- end }} + +{{- define "appdynamics-cloud-k8s-monitoring.getClusterName" }} +{{- if .Values.global.smartAgentInstall -}} +{{ "AGENT_PLATFORM_NAME_VALUE" }} +{{- else }} +{{- .Values.global.clusterName | required "clusterName needs to be specified" }} +{{- end }} +{{- end }} + +{{- define "appdynamics-cloud-k8s-monitoring.getNamespace" }} +{{- if .Values.global.smartAgentInstall -}} +{{- default .Release.Namespace .Values.global.namespace }} +{{- else }} +{{- .Release.Namespace }} +{{- end }} +{{- end }} + +{{- define "appdynamics-cloud-k8s-monitoring.getOAuth" }} +oauth: + {{ if .Values.global.smartAgentInstall -}} + # If installation is via the smartagent, use the special purpose value to set what is being used by the smartagent. + clientId: {{ "OAUTH_ID_VALUE" }} + clientSecret: {{ "OAUTH_SECRET_PLAIN_VALUE" }} + endpoint: {{ "SERVICE_DOMAIN_VALUE" }}/data + tokenUrl: {{ "OAUTH_URL_VALUE" }} + {{ else }} + {{ with .Values.global.oauth -}} + # Any one of clientIdEnvVar or clientId is required + {{ if .clientIdEnvVar -}} + clientIdVarSource: {{- toYaml .clientIdEnvVar.valueFrom | nindent 4 }} + {{ else }} + clientId: {{ required "One of clientId or clientIdEnvVar is required when agent management is enabled" .clientId }} + {{- end }} + # Any one of clientSecretEnvVar or clientSecret is required + {{ if .clientSecretEnvVar -}} + clientSecretVarSource: {{- toYaml .clientSecretEnvVar.valueFrom | nindent 4 }} + {{ else }} + clientSecret: {{ required "One of clientSecret or clientSecretEnvVar is required when agent management is enabled" .clientSecret }} + {{- end }} + endpoint: {{ required "endpoint is required" .endpoint }} + tokenUrl: {{ required "tokenUrl is required" .tokenUrl }} + {{- end }} + {{- end }} +{{- end }} + +{{- define "appdynamics-cloud-k8s-monitoring.getAgentManagementProxy" }} +agentManagementProxy: + {{ if .Values.global.smartAgentInstall -}} + # If installation is via the smartagent, use the special purpose value to set what is being used by the smartagent. + httpProxy: {{ "AGENT_HTTP_PROXY_VALUE" }} + httpsProxy: {{ "AGENT_HTTPS_PROXY_VALUE" }} + {{ else }} + {{ with .Values.global.agentManagementProxy -}} + httpProxy: {{ .httpProxy }} + httpsProxy: {{ .httpsProxy }} + noProxy: {{- toYaml .noProxy | nindent 4 }} + {{- end }} + {{- end }} +{{- end }} + +{{- define "appdynamics-cloud-k8s-monitoring.appdLogsCoditionalConfig"}} +conditionalConfigs: + - condition: + or: + - contains: + kubernetes.container.name: logcollector-agent + - contains: + kubernetes.container.name: inframon + - contains: + kubernetes.container.name: clustermon + - contains: + kubernetes.container.name: otc-container + - contains: + kubernetes.pod.name: cloud-operator + - contains: + kubernetes.pod.name: opentelemetry-operator + - contains: + kubernetes.container.name: appdynamics-smartagent + config: + messageParser: + timestamp: + enabled: true + format: ABSOLUTE +{{- end }} + +{{- define "appdynamics-cloud-k8s-monitoring.logCollectorConditionProcessors" }} +{{- if .logFormat}} +- add_fields: + target: appd + fields: + log.format: {{ .logFormat }} +{{- end}} +{{- if .messageParser }} +{{- with .messageParser }} +{{- $numOfDefaultMessageParsersEnabled := 0 }} +{{- $allParsers := (list .logback .log4J .json .grok .infra .multi .timestamp) }} +{{- range $allParsers }} +{{- if . }} +{{- if .enabled }} + {{- $numOfDefaultMessageParsersEnabled = add1 $numOfDefaultMessageParsersEnabled }} +{{- end }} +{{- end }} +{{- end }} +{{- if gt $numOfDefaultMessageParsersEnabled 1 }} +{{- fail "More than one \"enabled\" messageParser is not supported" }} +{{- end }} +- add_fields: + target: _message_parser + fields: + {{- if eq $numOfDefaultMessageParsersEnabled 0}} + type: timestamp + format: "ABSOLUTE" + scan: "true" + + {{- else}} + + {{- if .log4J}} + {{- if .log4J.enabled}} + type: log4j + pattern: {{.log4J.pattern | quote}} + {{- end}} + {{- end}} + + {{- if .logback}} + {{- if .logback.enabled}} + type: logback + pattern: {{.logback.pattern | quote}} + {{- end}} + {{- end}} + + {{- if .json}} + {{- if .json.enabled}} + {{- with .json}} + type: json + timestamp_field: {{if .timestampField}}{{.timestampField | quote}}{{end}} + timestamp_pattern: {{ .timestampPattern | default "yyyy-MM-dd HH:mm:ss,SSS" | quote}} + {{if .flattenSep}}flatten_sep: {{.flattenSep | quote}}{{end}} + {{if .fieldsToIgnore}}fields_to_ignore: {{$first := true}}"{{range .fieldsToIgnore}}{{if $first}}{{$first = false}}{{else}},{{end}}{{.}}{{end}}"{{end}} + {{if .fieldsToIgnore}}fields_to_ignore_sep: ","{{end}} + {{if .fieldsToIgnoreRegex}}fields_to_ignore_regex: {{.fieldsToIgnoreRegex | quote}}{{end}} + {{if .maxNumOfFields}}max_num_of_fields: {{.maxNumOfFields}}{{end}} + {{if .maxDepth}}max_depth: {{.maxDepth}}{{end}} + {{- end}} + {{- end}} + {{- end}} + + {{- if .timestamp}} + {{- if .timestamp.enabled}} + type: timestamp + format: {{.timestamp.format | quote}} + scan: "true" + {{- end}} + {{- end}} + + {{- if .grok}} + {{- if .grok.enabled}} + type: grok + pattern: + {{- range .grok.patterns}} + - {{. | quote}} + {{- end}} + timestamp_field: {{.grok.timestampField | default ""}} + timestamp_format: {{ .grok.timestampPattern | default "yyyy-MM-dd HH:mm:ss,SSS" | quote}} + {{- end}} + {{- end}} + + {{- if .infra}} + {{- if .infra.enabled}} + type: infra + {{- end}} + {{- end}} + + {{- if .multi}} + {{- if .multi.enabled}} + type: multi + parsers: {{.multi.parsers | quote}} + {{- end}} + {{- end}} + + {{- if .grok}} + {{- if and .grok.enabled .subparsers}} + subparsers: {{.subparsers | quote}} + {{- end}} + {{- end}} + {{- end}} +{{- end }} +{{- else }} +- add_fields: + target: _message_parser + fields: + type: timestamp + format: "ABSOLUTE" + scan: "true" +{{- end }} +{{- end }} + +{{- define "appdynamics-cloud-k8s-monitoring.filebeatYml" }} +{{- $clusterName := "" }} +{{- if .Values.global.smartAgentInstall }} +# If installation is via the smartagent, use the special purpose value to set what is being used by the smartagent. +{{- $clusterName = "AGENT_PLATFORM_NAME_VALUE" }} +{{- else }} +{{- $clusterName = .Values.global.clusterName | required "clusterName needs to be specified" }} +{{- end }} +{{- $clusterId := "" }} +{{- $clusterId = include "appdynamics-cloud-k8s-monitoring.getClusterId" . }} +{{- $osVal := .osVal }} +{{- $linux := "linux" }} +{{- $windows := "windows" }} +{{- $container:= deepCopy .Values.logCollectorConfig.container}} +{{- $osContainer:= index .Values.logCollectorConfig.env $osVal "container"}} +{{- $containerConfig:= mustMergeOverwrite $container $osContainer}} +{{- if and ($osContainer) (index $container "defaultConfig") }} +{{- $osMessageParser:= index $osContainer "defaultConfig" "messageParser"}} +{{- $_:= set $containerConfig.defaultConfig "messageParser" (coalesce $osMessageParser $containerConfig.defaultConfig.messageParser) }} +{{- end }} +{{- if ( $containerConfig.monitorCollectors)}} +{{- $_:=set $containerConfig "conditionalConfigs" (concat $containerConfig.conditionalConfigs (include "appdynamics-cloud-k8s-monitoring.appdLogsCoditionalConfig" .| fromYaml).conditionalConfigs) }} +{{- end }} +filebeat.autodiscover: + providers: + - type: kubernetes + node: ${NODE_NAME} + labels.dedot: false + annotations.dedot: false + hints.enabled: true + {{- if $containerConfig.defaultConfig.enabled}} + {{- with $containerConfig.defaultConfig}} + hints.default_config: + enabled: true + type: filestream + id: fsid-${data.kubernetes.pod.name}-${data.kubernetes.container.id} + close_removed: false + clean_removed: false + paths: + {{- if eq $osVal $linux }} + - /var/log/containers/${data.kubernetes.pod.name}*${data.kubernetes.container.id}.log + {{- end }} + {{- if eq $osVal $windows }} + - C:/var/log/containers/${data.kubernetes.pod.name}*${data.kubernetes.container.id}.log + {{- end }} + parsers: + - container: + stream: all + format: auto + {{- if .multiLinePattern}} + - multiline: + type: pattern + pattern: {{.multiLinePattern | quote}} + {{- if .multiLineNegate}} + negate: {{.multiLineNegate}} + {{- end}} + match: {{ required "\"multiLineMatch\" field is mandatory, if \"multiLinePattern\" is set." .multiLineMatch }} + {{- end}} + prospector.scanner.symlinks: true + prospector.scanner.exclude_files : [".*(((log)-(collector))|(inframon)|(clustermon)|((otel)-(collect))|((cloud)-(operator))|((opentelemetry)-(operator))|((kube)-)|((fso)-(agent)-(mgmt)))+.*log"] + processors: + {{- include "appdynamics-cloud-k8s-monitoring.logCollectorConditionProcessors" . | nindent 10 }} + {{- end}} + {{- else}} + hints.default_config.enabled: false + {{- end }} + templates: + {{- range $containerConfig.conditionalConfigs}} + - condition: + {{- if .condition}} + {{- if .condition.operator}} + {{.condition.operator}}: + {{.condition.key}}: {{.condition.value}} + {{- else}} + {{ .condition | toYaml | indent 14 | trim }} + {{- end}} + {{- end}} + config: + {{- if .config}} + {{- with .config}} + - type: filestream + id: fsid-${data.kubernetes.pod.name}-${data.kubernetes.container.id} + close_removed: false + clean_removed: false + paths: + {{- if eq $osVal $linux }} + - /var/log/containers/${data.kubernetes.pod.name}*${data.kubernetes.container.id}.log + {{- end }} + {{- if eq $osVal $windows }} + - C:/var/log/containers/${data.kubernetes.pod.name}*${data.kubernetes.container.id}.log + {{- end}} + parsers: + - container: + stream: all + format: auto + {{- if .multiLinePattern}} + - multiline: + type: pattern + pattern: {{.multiLinePattern | quote}} + {{- if .multiLineNegate}} + negate: {{.multiLineNegate}} + {{- end}} + match: {{ required "\"multiLineMatch\" field is mandatory, if \"multiLinePattern\" is set." .multiLineMatch }} + {{- end}} + prospector.scanner.symlinks: true + processors: + {{- include "appdynamics-cloud-k8s-monitoring.logCollectorConditionProcessors" . | nindent 16 }} + {{- end}} + {{- end}} + {{- end}} +{{- with $containerConfig }} +processors: + - add_cloud_metadata: ~ + - add_kubernetes_metadata: + in_cluster: true + host: ${NODE_NAME} + matchers: + - logs_path: + {{- if eq $osVal $linux }} + logs_path: "/var/log/containers/" + {{- end }} + {{- if eq $osVal $windows }} + logs_path: "C:/ProgramData/docker/containers/" + {{- end }} + - copy_fields: + fields: + - from: "kubernetes.deployment.name" + to: "kubernetes.workload.name" + - from: "kubernetes.daemonset.name" + to: "kubernetes.workload.name" + - from: "kubernetes.statefulset.name" + to: "kubernetes.workload.name" + - from: "kubernetes.replicaset.name" + to: "kubernetes.workload.name" + - from: "kubernetes.cronjob.name" + to: "kubernetes.workload.name" + - from: "kubernetes.job.name" + to: "kubernetes.workload.name" + fail_on_error: false + ignore_missing: true + {{- if .excludeCondition}} + - drop_event: + when: + {{ .excludeCondition | toYaml | indent 10 | trim }} + {{- end}} + - rename: + fields: + - from: "kubernetes.namespace" + to: "kubernetes.namespace.name" + - from: "kubernetes" + to: "k8s" + - from: k8s.annotations.appdynamics.lca/filebeat.parser + to: "_message_parser" + - from: "cloud.instance.id" + to: "host.id" + - from: "k8s.container.name" + to: "container.name" + ignore_missing: true + fail_on_error: false + - drop_fields: + fields: ["agent", "stream", "ecs", "input", "orchestrator", "k8s.annotations.appdynamics", "k8s.labels", "k8s.node.labels", "cloud"] + ignore_missing: true + - script: + lang: javascript + source: > + function process(event) { + var podUID = event.Get("k8s.pod.uid"); + if (podUID) { + event.Put("internal.container.encapsulating_object_id", "{{ $clusterId }}:" + podUID); + } + return event; + } + {{- if .dropFields}} + - drop_fields: + fields: [{{range .dropFields}}{{. | quote}}, {{end}}] + ignore_missing: true + {{- end}} + - dissect: + tokenizer: "%{name}:%{tag}" + field: "container.image.name" + target_prefix: "container.image" + ignore_failure: true + overwrite_keys: true + - add_fields: + target: k8s + fields: + cluster.name: {{ $clusterName }} + cluster.id: {{ $clusterId }} + - add_fields: + target: telemetry + fields: + sdk.name: log-agent + - add_fields: + target: os + fields: + type: {{ $osVal }} +output.otlploggrpc: + groupby_resource_fields: + - k8s + - source + - host + - container + - log + - telemetry + - internal + - os + # using the separate LCA logs pipeline's OTLP GRPC receiver port (14317) + hosts: ["${APPD_OTELCOL_GRPC_RECEIVER_HOST}:14317"] + worker: {{.worker}} + max_bytes: {{.maxBytes}} + #hosts: ["otel-collector-local-service.appdynamics.svc.cluster.local:8080"] + {{- with $.Values.global.tls.appdCollectors }} + ssl.enabled: {{.enabled}} + ssl.supported_protocols: [TLSv1.3] + {{- if .enabled}} + {{- if eq $osVal $linux }} + ssl.certificate_authorities: ["/opt/appdynamics/certs/ca/ca.pem"] + ssl.certificate: "/opt/appdynamics/certs/client/client.pem" + ssl.key: "/opt/appdynamics/certs/client/client-key.pem" + {{- end }} + {{- if eq $osVal $windows }} + ssl.certificate_authorities: ["C:/filebeat/certs/ca/ca.pem"] + ssl.certificate: "C:/filebeat/certs/client/client.pem" + ssl.key: "C:/filebeat/certs/client/client-key.pem" + {{- end }} + {{- end}} + {{- end}} + wait_for_ready: true + batch_size: {{.batchSize}} + summary_debug_logs_interval: {{.summaryDebugLogsInterval}} +filebeat.registry.path: registry1 +filebeat.registry.file_permissions: 0640 +{{- if eq $osVal $linux }} +path.data: /opt/appdynamics/logcollector-agent/data +{{- end }} +{{- if eq $osVal $windows }} +path.data: C:/ProgramData/filebeat/data +{{- end}} +{{- with .logging}} +logging: + level: {{.level}} + {{- with .files}} + to_files: {{.enabled}} + files: + {{- if eq $osVal $linux }} + path: /opt/appdynamics/logcollector-agent/log + {{- end }} + {{- if eq $osVal $windows }} + path: C:/ProgramData/filebeat/log + {{- end }} + name: lca-log + keepfiles: {{.keepFiles}} + permissions: 0640 + {{- end}} + selectors: [{{if .metrics.enabled}}monitoring,{{end}}{{range .selectors}}{{.}},{{end}}] + {{- with .metrics}} + metrics: + enabled: {{.enabled}} + period: {{.period}} + {{- end}} +{{- end}} +{{- with .monitoring}} +monitoring: + enabled: {{if $.Values.agentManagementEnabled.logCollector}}{{.otlpmetric.enabled}}{{else}}false{{end}} + {{- if .otlpmetric.enabled}} + {{- with .otlpmetric}} + otlpmetric: + endpoint: {{.endpoint}} + protocol: {{.protocol}} + collect_period: {{.collectPeriod}} + report_period: {{.reportPeriod}} + resource_attributes: + {{- range .resourceAttrs}} + {{.key}}: {{.value | quote}} + {{- end}} + {{- if (gt (len .metrics) 0)}} + metrics: + {{- range .metrics}} + - {{.}} + {{- end}} + {{- end}} + {{- if .retry.enabled}} + {{- with .retry}} + retry: + enabled: {{.enabled}} + initial_interval: {{.initialInterval}} + max_interval: {{.maxInterval}} + max_elapsed_time: {{.maxElapsedTime}} + {{- end}} + {{- end}} + {{- with $.Values.global.tls.appdCollectors }} + ssl.enabled: {{.enabled}} + ssl.supported_protocols: [TLSv1.3] + {{- if .enabled}} + {{- if eq $osVal $linux }} + ssl.certificate_authorities: ["/opt/appdynamics/certs/ca/ca.pem"] + ssl.certificate: "/opt/appdynamics/certs/client/client.pem" + ssl.key: "/opt/appdynamics/certs/client/client-key.pem" + {{- end }} + {{- if eq $osVal $windows }} + ssl.certificate_authorities: ["C:/filebeat/certs/ca/ca.pem"] + ssl.certificate: "C:/filebeat/certs/client/client.pem" + ssl.key: "C:/filebeat/certs/client/client-key.pem" + {{- end }} + {{- end}} + {{- end}} + {{- end}} + {{- end}} +{{- end}} +{{- end}} +{{- end}} diff --git a/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-cloud-k8s-monitoring/templates/clustermon.yaml b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-cloud-k8s-monitoring/templates/clustermon.yaml new file mode 100644 index 00000000..4221c526 --- /dev/null +++ b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-cloud-k8s-monitoring/templates/clustermon.yaml @@ -0,0 +1,107 @@ +{{ if .Values.install.clustermon -}} +apiVersion: cluster.appdynamics.com/v1alpha1 +kind: Clustermon +metadata: + name: {{ printf "%s-%s" .Release.Name "appdynamics-clustermon" | trunc 63 }} + namespace: {{ include "appdynamics-cloud-k8s-monitoring.getNamespace" . }} +spec: + {{- $mergedPodConfig := .Values.clustermonPod }} + {{- if (index $mergedPodConfig "nodeSelector" "kubernetes.io/os") }} + {{- if ne (index $mergedPodConfig "nodeSelector" "kubernetes.io/os") .Values.clustermonConfig.os }} + {{- fail "Invalid node selector" }} + {{- end }} + {{- else }} + {{- $mergedPodConfig := mustMergeOverwrite (include "appdynamics-cloud-k8s-monitoring.osNodeSelector" .Values.clustermonConfig.os | fromYaml ) $mergedPodConfig }} + {{- end }} + {{- template "appdynamics-cloud-k8s-monitoring.podConfigs" $mergedPodConfig }} + + serviceAccountName: {{ .Values.clustermonServiceAccount}} + + {{ with .Values.global.tls.appdCollectors -}} + mTLS: + enabled: {{ .enabled | quote }} + {{ if .enabled -}} + secretName: {{ required "A valid secret name entry required!" .secret.secretName }} + secretKeys: + caCert: {{ .secret.secretKeys.caCert }} + tlsCert: {{ .secret.secretKeys.tlsCert }} + tlsKey: {{ .secret.secretKeys.tlsKey }} + {{- end }} + {{- end }} + + agentManagementEnabled: {{ .Values.agentManagementEnabled.clustermon | quote }} + {{ if .Values.agentManagementEnabled.clustermon -}} + {{- include "appdynamics-cloud-k8s-monitoring.getOAuth" . | nindent 2 }} + {{- include "appdynamics-cloud-k8s-monitoring.getAgentManagementProxy" . | nindent 2 }} + {{- end }} + collectorName: {{ .Release.Name }} + + clusterName: {{ include "appdynamics-cloud-k8s-monitoring.getClusterName" . }} + helmChartVersion: {{ .Values.global.helmChartVersion }} + clusterID: {{ include "appdynamics-cloud-k8s-monitoring.getClusterId" . }} + {{ with .Values.clustermonConfig -}} + os: {{ .os }} + logLevel: {{ .logLevel }} + logFilesMaxSizeMb: {{ .logFilesMaxSizeMb }} + logFilesNumBackups: {{ .logFilesNumBackups }} + printToStdout: {{ .printToStdout | quote }} + {{ if .gatherInterval -}} + gatherInterval: {{ .gatherInterval }} + {{- end }} + {{ if .maxGoRoutine -}} + maxGoRoutine: {{ .maxGoRoutine }} + {{- end }} + + filters: + namespace: + {{- toYaml .filters.namespace | nindent 6 }} + entity: + {{- toYaml .filters.entity | nindent 6 }} + label: + {{- toYaml .filters.label | nindent 6 }} + annotation: + {{- toYaml .filters.annotation | nindent 6}} + scopedFilters: + {{- toYaml .filters.scopedFilters | nindent 6}} + + {{ if .testSetupEnabled -}} + testSetupEnabled: {{ .testSetupEnabled | quote }} + {{- end }} + {{ if .optimisedPayloadInterval -}} + optimisedPayloadInterval: {{ .optimisedPayloadInterval }} + {{- end }} + {{ if .events -}} + events: + enabled: {{ .events.enabled | quote }} + severityToExclude: + {{- toYaml .events.severityToExclude | nindent 6 }} + reasonToExclude: + {{- toYaml .events.reasonToExclude | nindent 6 }} + severeGroupByReason: + {{- toYaml .events.severeGroupByReason | nindent 6 }} + {{- end }} + + labelsIngestionEnabled: {{ .labels.enabled }} + configurationEnabled: {{ .configurations.enabled | quote }} + hpaVpaEnabled: {{ .autoscalers.hpaVpaEnabled | quote }} + + {{ if .profiler -}} + profiler: + enabled: {{ .profiler.enabled | quote }} + port: {{ .profiler.port }} + {{- end }} + + + ingressControllers: + {{- toYaml .ingressControllers | nindent 4 }} + {{- end }} + + {{ with .Values.infraManagerConfig -}} + infraManagerConfig: + logFilesMaxSizeMb: {{ .logFilesMaxSizeMb }} + logFilesNumBackups: {{ .logFilesNumBackups }} + printToStdout: {{ .printToStdout | quote }} + logLevel: {{ .logLevel }} + {{- end }} + +{{ end -}} diff --git a/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-cloud-k8s-monitoring/templates/common/_common.tpl b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-cloud-k8s-monitoring/templates/common/_common.tpl new file mode 100644 index 00000000..261a1f2e --- /dev/null +++ b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-cloud-k8s-monitoring/templates/common/_common.tpl @@ -0,0 +1,12 @@ +{{- define "appdynamics-cloud-k8s-monitoring.getClusterId" }} +{{- if .Values.global.smartAgentInstall -}} +{{ "AGENT_PLATFORM_ID_VALUE" }} +{{- else -}} +{{- if (lookup "v1" "Namespace" "" "kube-system").metadata }} +{{- required "Could not fetch kube-system uid to populate clusterID! " (lookup "v1" "Namespace" "" "kube-system").metadata.uid }} +{{- else -}} +{{- .Values.global.clusterId | required "clusterId needs to be specified when kube-system metadata is not accessible!" }} +{{- end }} +{{- end }} +{{- end }} + diff --git a/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-cloud-k8s-monitoring/templates/containermon.yaml b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-cloud-k8s-monitoring/templates/containermon.yaml new file mode 100644 index 00000000..e0dc37de --- /dev/null +++ b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-cloud-k8s-monitoring/templates/containermon.yaml @@ -0,0 +1,46 @@ +{{ if and .Values.install.defaultInfraCollectors (has "linux" .Values.containermonConfig.os) -}} +apiVersion: cluster.appdynamics.com/v1alpha1 +kind: Containermon +metadata: + name: {{ printf "%s-%s" .Release.Name "appdynamics-containermon" | trunc 63 }} + namespace: {{ include "appdynamics-cloud-k8s-monitoring.getNamespace" . }} +spec: + os: linux + clusterID: {{ include "appdynamics-cloud-k8s-monitoring.getClusterId" . }} + {{$containermonConfig := .Values.containermonConfig}} + {{ if (.Values.containermonConfig.env).linux -}} + {{$containermonConfig = mustMergeOverwrite .Values.containermonConfig .Values.containermonConfig.env.linux}} + {{- end }} + {{ with $containermonConfig -}} + {{ if .gatherInterval -}} + gatherInterval: {{ .gatherInterval }} + {{- end }} + logFilesMaxSizeMb: {{ .logFilesMaxSizeMb }} + logFilesNumBackups: {{ .logFilesNumBackups }} + logLevel: {{ .logLevel }} + {{- end }} +{{- end }} +--- +{{ if and .Values.install.defaultInfraCollectors (has "windows" .Values.containermonConfig.os) -}} +apiVersion: cluster.appdynamics.com/v1alpha1 +kind: Containermon +metadata: + name: {{ printf "%s-%s" .Release.Name "appdynamics-containermon-windows" | trunc 63 }} + namespace: {{ include "appdynamics-cloud-k8s-monitoring.getNamespace" . }} +spec: + os: windows + clusterID: {{ include "appdynamics-cloud-k8s-monitoring.getClusterId" . }} + {{$containermonConfig := .Values.containermonConfig}} + {{ if (.Values.containermonConfig.env).windows -}} + {{$containermonConfig = mustMergeOverwrite .Values.containermonConfig .Values.containermonConfig.env.windows}} + {{- end }} + {{ with $containermonConfig -}} + {{ if .gatherInterval -}} + gatherInterval: {{ .gatherInterval }} + {{- end }} + logFilesMaxSizeMb: {{ .logFilesMaxSizeMb }} + logFilesNumBackups: {{ .logFilesNumBackups }} + logLevel: {{ .logLevel }} + {{- end }} +{{- end }} + diff --git a/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-cloud-k8s-monitoring/templates/inframon.yaml b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-cloud-k8s-monitoring/templates/inframon.yaml new file mode 100644 index 00000000..a61af2f5 --- /dev/null +++ b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-cloud-k8s-monitoring/templates/inframon.yaml @@ -0,0 +1,107 @@ +{{ if and .Values.install.defaultInfraCollectors (or (has "linux" .Values.containermonConfig.os) (has "linux" .Values.servermonConfig.os)) -}} +apiVersion: cluster.appdynamics.com/v1alpha1 +kind: Inframon +metadata: + name: {{ printf "%s-%s" .Release.Name "appdynamics-inframon" | trunc 63 }} + namespace: {{ include "appdynamics-cloud-k8s-monitoring.getNamespace" . }} +spec: + {{- $mergedPodConfig := (mustMergeOverwrite .Values.inframonPod .Values.inframonPod.env.linux) }} + {{- if (index $mergedPodConfig "nodeSelector" "kubernetes.io/os") }} + {{- if ne (index $mergedPodConfig "nodeSelector" "kubernetes.io/os") "linux" }} + {{- fail "Invalid node selector" }} + {{- end }} + {{- else }} + {{- $mergedPodConfig := mustMergeOverwrite (include "appdynamics-cloud-k8s-monitoring.osNodeSelector" "linux" | fromYaml ) $mergedPodConfig }} + {{- end }} + {{- template "appdynamics-cloud-k8s-monitoring.podConfigs" $mergedPodConfig }} + os: linux + serviceAccountName: {{ .Values.inframonServiceAccount}} + + {{ with .Values.global.tls.appdCollectors -}} + mTLS: + enabled: {{ .enabled | quote }} + {{ if .enabled -}} + secretName: {{ required "A valid secret name entry required!" .secret.secretName }} + secretKeys: + caCert: {{ .secret.secretKeys.caCert }} + tlsCert: {{ .secret.secretKeys.tlsCert }} + tlsKey: {{ .secret.secretKeys.tlsKey }} + {{- end }} + {{- end }} + + agentManagementEnabled: {{ .Values.agentManagementEnabled.defaultInfraCollectors | quote }} + {{ if .Values.agentManagementEnabled.defaultInfraCollectors -}} + collectorName: {{ .Release.Name }} + clusterName: {{ include "appdynamics-cloud-k8s-monitoring.getClusterName" . }} + helmChartVersion: {{ .Values.global.helmChartVersion }} + clusterID: {{ include "appdynamics-cloud-k8s-monitoring.getClusterId" . }} + {{- include "appdynamics-cloud-k8s-monitoring.getOAuth" . | nindent 2 }} + {{- include "appdynamics-cloud-k8s-monitoring.getAgentManagementProxy" . | nindent 2 }} + {{- end }} + + {{ with .Values.infraManagerConfig -}} + logFilesMaxSizeMb: {{ .logFilesMaxSizeMb }} + logFilesNumBackups: {{ .logFilesNumBackups }} + printToStdout: {{ .printToStdout | quote }} + logLevel: {{ .logLevel }} + {{- end }} +{{- end }} +--- +{{ if and .Values.install.defaultInfraCollectors (or (has "windows" .Values.containermonConfig.os) (has "windows" .Values.servermonConfig.os)) -}} +apiVersion: cluster.appdynamics.com/v1alpha1 +kind: Inframon +metadata: + name: {{ printf "%s-%s" .Release.Name "appdynamics-inframon-windows" | trunc 63 }} + namespace: {{ include "appdynamics-cloud-k8s-monitoring.getNamespace" . }} +spec: + {{- $mergedPodConfig := (mustMergeOverwrite .Values.inframonPod .Values.inframonPod.env.windows) }} + {{- if (index $mergedPodConfig "nodeSelector" "kubernetes.io/os") }} + {{- if ne (index $mergedPodConfig "nodeSelector" "kubernetes.io/os") "windows" }} + {{- fail "Invalid node selector" }} + {{- end }} + {{- else }} + {{- $mergedPodConfig := mustMergeOverwrite (include "appdynamics-cloud-k8s-monitoring.osNodeSelector" "windows" | fromYaml ) $mergedPodConfig }} + {{- end }} + {{- template "appdynamics-cloud-k8s-monitoring.podConfigs" $mergedPodConfig }} + + os: windows + serviceAccountName: {{ .Values.inframonServiceAccount}} + + {{ with .Values.global.tls.appdCollectors -}} + mTLS: + enabled: {{ .enabled | quote }} + {{ if .enabled -}} + secretName: {{ required "A valid secret name entry required!" .secret.secretName }} + secretKeys: + caCert: {{ .secret.secretKeys.caCert }} + tlsCert: {{ .secret.secretKeys.tlsCert }} + tlsKey: {{ .secret.secretKeys.tlsKey }} + {{- end }} + {{- end }} + + agentManagementEnabled: {{ .Values.agentManagementEnabled.defaultInfraCollectors | quote }} + {{ if .Values.agentManagementEnabled.defaultInfraCollectors -}} + collectorName: {{ .Release.Name }} + clusterName: {{ include "appdynamics-cloud-k8s-monitoring.getClusterName" . }} + helmChartVersion: {{ .Values.global.helmChartVersion }} + clusterID: {{ include "appdynamics-cloud-k8s-monitoring.getClusterId" . }} + {{- include "appdynamics-cloud-k8s-monitoring.getOAuth" . | nindent 2 }} + {{- include "appdynamics-cloud-k8s-monitoring.getAgentManagementProxy" . | nindent 2 }} + {{- end }} + + {{ with .Values.infraManagerConfig -}} + logFilesMaxSizeMb: {{ .logFilesMaxSizeMb }} + logFilesNumBackups: {{ .logFilesNumBackups }} + printToStdout: {{ .printToStdout | quote }} + logLevel: {{ .logLevel }} + {{- end }} + + windowsExporter: + {{- $mergedPodConfig := .Values.windowsExporterPod }} + {{- if (index $mergedPodConfig "nodeSelector" "kubernetes.io/os") }} + {{- if ne (index $mergedPodConfig "nodeSelector" "kubernetes.io/os") "windows" }} + {{- fail "Invalid node selector" }} + {{- end }} + {{- end }} + {{- include "appdynamics-cloud-k8s-monitoring.podConfigs" $mergedPodConfig | nindent 2 }} +{{- end }} diff --git a/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-cloud-k8s-monitoring/templates/logCollector.yaml b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-cloud-k8s-monitoring/templates/logCollector.yaml new file mode 100644 index 00000000..7e2e8a3f --- /dev/null +++ b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-cloud-k8s-monitoring/templates/logCollector.yaml @@ -0,0 +1,87 @@ +{{$linux :="linux"}} +{{$windows :="windows"}} +{{ if and (.Values.install.logCollector) (has $linux .Values.logCollectorConfig.os) -}} +apiVersion: cluster.appdynamics.com/v1alpha1 +kind: LogCollector +metadata: + name: {{ printf "%s-%s" .Release.Name "appdynamics-log-collector" | trunc 63 }} + namespace: {{ include "appdynamics-cloud-k8s-monitoring.getNamespace" . }} +spec: + {{- template "appdynamics-cloud-k8s-monitoring.podConfigs" (mustMergeOverwrite .Values.logCollectorPod .Values.logCollectorPod.env.linux) }} + os: {{$linux}} + serviceAccountName: {{ .Values.logCollectorServiceAccount}} + rollingUpdateMaxUnavailable: {{ .Values.logCollectorPod.rollingUpdateMaxUnavailable}} + + {{ with .Values.global.tls.appdCollectors -}} + mTLS: + enabled: {{ .enabled | quote }} + {{ if .enabled -}} + secretName: {{ required "A valid secret name entry required!" .secret.secretName }} + secretKeys: + caCert: {{ .secret.secretKeys.caCert }} + tlsCert: {{ .secret.secretKeys.tlsCert }} + tlsKey: {{ .secret.secretKeys.tlsKey }} + {{- end }} + {{- end }} + + {{ if (coalesce .Values.logCollectorConfig.env.linux.filebeatYaml .Values.logCollectorConfig.filebeatYaml ) -}} + filebeatYaml: {{ (coalesce .Values.logCollectorConfig.env.linux.filebeatYaml .Values.logCollectorConfig.filebeatYaml ) | quote }} + {{ else -}} + {{$data := dict "osVal" $linux "Values" .Values }} + filebeatYaml: |- + {{- include "appdynamics-cloud-k8s-monitoring.filebeatYml" $data | nindent 8}} + {{- end }} + + agentManagementEnabled: {{ .Values.agentManagementEnabled.logCollector | quote }} + {{ if .Values.agentManagementEnabled.logCollector -}} + collectorName: {{ .Release.Name }} + clusterName: {{ include "appdynamics-cloud-k8s-monitoring.getClusterName" . }} + helmChartVersion: {{ .Values.global.helmChartVersion }} + clusterID: {{ include "appdynamics-cloud-k8s-monitoring.getClusterId" . }} + {{- include "appdynamics-cloud-k8s-monitoring.getOAuth" . | nindent 2 }} + {{- include "appdynamics-cloud-k8s-monitoring.getAgentManagementProxy" . | nindent 2 }} + {{- end }} +{{- end }} +--- +{{ if and (.Values.install.logCollector) (has $windows .Values.logCollectorConfig.os) -}} +apiVersion: cluster.appdynamics.com/v1alpha1 +kind: LogCollector +metadata: + name: {{ printf "%s-%s" .Release.Name "appdynamics-log-collector-windows" | trunc 63 }} + namespace: {{ include "appdynamics-cloud-k8s-monitoring.getNamespace" . }} +spec: + {{- template "appdynamics-cloud-k8s-monitoring.podConfigs" (mustMergeOverwrite .Values.logCollectorPod .Values.logCollectorPod.env.windows) }} + os: {{$windows}} + serviceAccountName: {{ .Values.logCollectorServiceAccount}} + + {{ with .Values.global.tls.appdCollectors -}} + mTLS: + enabled: {{ .enabled | quote }} + {{ if .enabled -}} + secretName: {{ required "A valid secret name entry required!" .secret.secretName }} + secretKeys: + caCert: {{ .secret.secretKeys.caCert }} + tlsCert: {{ .secret.secretKeys.tlsCert }} + tlsKey: {{ .secret.secretKeys.tlsKey }} + {{- end }} + {{- end }} + + {{ if (coalesce .Values.logCollectorConfig.env.windows.filebeatYaml .Values.logCollectorConfig.filebeatYaml ) -}} + filebeatYaml: |- + {{- (coalesce .Values.logCollectorConfig.env.windows.filebeatYaml .Values.logCollectorConfig.filebeatYaml ) | nindent 8 }} + {{ else -}} + {{$data := dict "osVal" $windows "Values" .Values }} + filebeatYaml: |- + {{- include "appdynamics-cloud-k8s-monitoring.filebeatYml" $data | nindent 8}} # appdynamics-cloud-k8s-monitoring.filebeatYml change this when simplified/default config + {{- end }} + + agentManagementEnabled: {{ .Values.agentManagementEnabled.logCollector | quote }} + {{ if .Values.agentManagementEnabled.logCollector -}} + collectorName: {{ .Release.Name }} + clusterName: {{ include "appdynamics-cloud-k8s-monitoring.getClusterName" . }} + helmChartVersion: {{ .Values.global.helmChartVersion }} + clusterID: {{ include "appdynamics-cloud-k8s-monitoring.getClusterId" . }} + {{- include "appdynamics-cloud-k8s-monitoring.getOAuth" . | nindent 2 }} + {{- include "appdynamics-cloud-k8s-monitoring.getAgentManagementProxy" . | nindent 2 }} + {{- end }} +{{- end }} diff --git a/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-cloud-k8s-monitoring/templates/rbac/clustermon_clusterrole.yaml b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-cloud-k8s-monitoring/templates/rbac/clustermon_clusterrole.yaml new file mode 100644 index 00000000..66bec137 --- /dev/null +++ b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-cloud-k8s-monitoring/templates/rbac/clustermon_clusterrole.yaml @@ -0,0 +1,73 @@ +{{ if .Values.install.clustermon -}} +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: appdynamics-clustermon-clusterrole +rules: +- apiGroups: + - "" + resources: + - endpoints + - services + - events + - namespaces + - nodes + - nodes/proxy + - pods + - resourcequotas + - persistentvolumeclaims + - persistentvolumes + - replicationcontrollers +{{- if .Values.clustermonConfig.configurations.enabled }} + - configmaps + - secrets +{{- end }} + verbs: + - get + - list + - watch +- apiGroups: + - apps + resources: + - daemonsets + - deployments + - replicasets + - statefulsets + verbs: + - get + - list + - watch +- apiGroups: + - networking.k8s.io + resources: + - ingresses + verbs: + - get + - list + - watch +- apiGroups: + - batch + resources: + - cronjobs + - jobs + verbs: + - get + - list + - watch +- apiGroups: + - autoscaling.k8s.io + resources: + - verticalpodautoscalers + verbs: + - get + - list + - watch +- apiGroups: + - autoscaling + resources: + - horizontalpodautoscalers + verbs: + - get + - list + - watch +{{- end }} \ No newline at end of file diff --git a/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-cloud-k8s-monitoring/templates/rbac/clustermon_clusterrole_binding.yaml b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-cloud-k8s-monitoring/templates/rbac/clustermon_clusterrole_binding.yaml new file mode 100644 index 00000000..35aa516a --- /dev/null +++ b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-cloud-k8s-monitoring/templates/rbac/clustermon_clusterrole_binding.yaml @@ -0,0 +1,14 @@ +{{ if .Values.install.clustermon -}} +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: appdynamics-clustermon-clusterrolebinding +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: appdynamics-clustermon-clusterrole +subjects: + - kind: ServiceAccount + name: {{ .Values.clustermonServiceAccount }} + namespace: {{ include "appdynamics-cloud-k8s-monitoring.getNamespace" . }} +{{- end }} diff --git a/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-cloud-k8s-monitoring/templates/rbac/clustermon_openshift_scc.yaml b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-cloud-k8s-monitoring/templates/rbac/clustermon_openshift_scc.yaml new file mode 100644 index 00000000..55cb71b8 --- /dev/null +++ b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-cloud-k8s-monitoring/templates/rbac/clustermon_openshift_scc.yaml @@ -0,0 +1,24 @@ +{{ if .Values.install.clustermon -}} +{{ if .Capabilities.APIVersions.Has "security.openshift.io/v1/SecurityContextConstraints" -}} +apiVersion: security.openshift.io/v1 +kind: SecurityContextConstraints +metadata: + annotations: + kubernetes.io/description: 'clustermon-privileged-scc is a custom SCC for AppDynamics Cloud Cluster Collector' + name: clustermon-privileged-scc + namespace: {{ include "appdynamics-cloud-k8s-monitoring.getNamespace" . }} +allowHostDirVolumePlugin: false +allowHostIPC: false +allowHostNetwork: false +allowHostPID: false +allowHostPorts: false +allowPrivilegedContainer: false +readOnlyRootFilesystem: false +runAsUser: + type: RunAsAny +seLinuxContext: + type: MustRunAs +users: + - system:serviceaccount:{{ include "appdynamics-cloud-k8s-monitoring.getNamespace" . }}:{{ .Values.clustermonServiceAccount }} +{{- end }} +{{- end }} diff --git a/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-cloud-k8s-monitoring/templates/rbac/clustermon_service_account.yaml b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-cloud-k8s-monitoring/templates/rbac/clustermon_service_account.yaml new file mode 100644 index 00000000..25b64e39 --- /dev/null +++ b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-cloud-k8s-monitoring/templates/rbac/clustermon_service_account.yaml @@ -0,0 +1,7 @@ +{{ if .Values.install.clustermon -}} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ .Values.clustermonServiceAccount }} + namespace: {{ include "appdynamics-cloud-k8s-monitoring.getNamespace" . }} +{{- end }} diff --git a/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-cloud-k8s-monitoring/templates/rbac/inframon_clusterrole.yaml b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-cloud-k8s-monitoring/templates/rbac/inframon_clusterrole.yaml new file mode 100644 index 00000000..3ecff48c --- /dev/null +++ b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-cloud-k8s-monitoring/templates/rbac/inframon_clusterrole.yaml @@ -0,0 +1,22 @@ +{{ if .Values.install.defaultInfraCollectors -}} +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: appdynamics-inframon-clusterrole +rules: +- apiGroups: + - "" + resources: + - nodes + - nodes/proxy + verbs: + - get +- apiGroups: + - "" + resources: + - pods + verbs: + - get + - list + - watch +{{- end }} \ No newline at end of file diff --git a/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-cloud-k8s-monitoring/templates/rbac/inframon_clusterrole_binding.yaml b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-cloud-k8s-monitoring/templates/rbac/inframon_clusterrole_binding.yaml new file mode 100644 index 00000000..117e7051 --- /dev/null +++ b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-cloud-k8s-monitoring/templates/rbac/inframon_clusterrole_binding.yaml @@ -0,0 +1,14 @@ +{{ if .Values.install.defaultInfraCollectors -}} +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: appdynamics-inframon-clusterrolebinding +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: appdynamics-inframon-clusterrole +subjects: + - kind: ServiceAccount + name: {{ .Values.inframonServiceAccount }} + namespace: {{ include "appdynamics-cloud-k8s-monitoring.getNamespace" . }} +{{- end }} diff --git a/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-cloud-k8s-monitoring/templates/rbac/inframon_openshift_scc.yaml b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-cloud-k8s-monitoring/templates/rbac/inframon_openshift_scc.yaml new file mode 100644 index 00000000..1105d524 --- /dev/null +++ b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-cloud-k8s-monitoring/templates/rbac/inframon_openshift_scc.yaml @@ -0,0 +1,24 @@ +{{ if .Values.install.defaultInfraCollectors -}} +{{ if .Capabilities.APIVersions.Has "security.openshift.io/v1/SecurityContextConstraints" -}} +apiVersion: security.openshift.io/v1 +kind: SecurityContextConstraints +metadata: + annotations: + kubernetes.io/description: 'inframon-privileged-scc is a custom SCC for AppDynamics Cloud Infrastructure Collector' + name: inframon-privileged-scc + namespace: {{ include "appdynamics-cloud-k8s-monitoring.getNamespace" . }} +allowHostDirVolumePlugin: true +allowHostIPC: false +allowHostNetwork: true +allowHostPID: false +allowHostPorts: false +allowPrivilegedContainer: false +readOnlyRootFilesystem: false +runAsUser: + type: RunAsAny +seLinuxContext: + type: MustRunAs +users: + - system:serviceaccount:{{ include "appdynamics-cloud-k8s-monitoring.getNamespace" . }}:{{ .Values.inframonServiceAccount }} +{{- end }} +{{- end }} diff --git a/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-cloud-k8s-monitoring/templates/rbac/inframon_service_account.yaml b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-cloud-k8s-monitoring/templates/rbac/inframon_service_account.yaml new file mode 100644 index 00000000..bb6a0ac2 --- /dev/null +++ b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-cloud-k8s-monitoring/templates/rbac/inframon_service_account.yaml @@ -0,0 +1,7 @@ +{{ if .Values.install.defaultInfraCollectors -}} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ .Values.inframonServiceAccount }} + namespace: {{ include "appdynamics-cloud-k8s-monitoring.getNamespace" . }} +{{- end }} diff --git a/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-cloud-k8s-monitoring/templates/rbac/logcollector_clusterrole.yaml b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-cloud-k8s-monitoring/templates/rbac/logcollector_clusterrole.yaml new file mode 100644 index 00000000..7dd13fce --- /dev/null +++ b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-cloud-k8s-monitoring/templates/rbac/logcollector_clusterrole.yaml @@ -0,0 +1,33 @@ +{{ if .Values.install.logCollector -}} +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: appdynamics-logcollector-clusterrole +rules: +- apiGroups: + - "" + resources: + - namespaces + - pods + - nodes + verbs: + - get + - watch + - list +- apiGroups: + - apps + resources: + - replicasets + verbs: + - get + - list + - watch +- apiGroups: + - batch + resources: + - jobs + verbs: + - get + - watch + - list +{{- end }} \ No newline at end of file diff --git a/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-cloud-k8s-monitoring/templates/rbac/logcollector_clusterrole_binding.yaml b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-cloud-k8s-monitoring/templates/rbac/logcollector_clusterrole_binding.yaml new file mode 100644 index 00000000..90246d79 --- /dev/null +++ b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-cloud-k8s-monitoring/templates/rbac/logcollector_clusterrole_binding.yaml @@ -0,0 +1,14 @@ +{{ if .Values.install.logCollector -}} +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: appdynamics-logcollector-clusterrolebinding +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: appdynamics-logcollector-clusterrole +subjects: + - kind: ServiceAccount + name: {{ .Values.logCollectorServiceAccount }} + namespace: {{ include "appdynamics-cloud-k8s-monitoring.getNamespace" . }} +{{- end }} diff --git a/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-cloud-k8s-monitoring/templates/rbac/logcollector_openshift_scc.yaml b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-cloud-k8s-monitoring/templates/rbac/logcollector_openshift_scc.yaml new file mode 100644 index 00000000..cfb58b39 --- /dev/null +++ b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-cloud-k8s-monitoring/templates/rbac/logcollector_openshift_scc.yaml @@ -0,0 +1,32 @@ +{{ if .Values.install.logCollector -}} +{{ if .Capabilities.APIVersions.Has "security.openshift.io/v1/SecurityContextConstraints" -}} +apiVersion: security.openshift.io/v1 +kind: SecurityContextConstraints +metadata: + annotations: + kubernetes.io/description: 'logcollector-privileged-scc is a custom SCC for AppDynamics Cloud Log Collector' + name: logcollector-privileged-scc + namespace: {{ include "appdynamics-cloud-k8s-monitoring.getNamespace" . }} +allowHostDirVolumePlugin: true +allowHostIPC: false +allowHostNetwork: true +allowHostPID: false +allowHostPorts: false +allowPrivilegedContainer: false +readOnlyRootFilesystem: true +runAsUser: + type: RunAsAny +seLinuxContext: + type: RunAsAny +users: + - system:serviceaccount:{{ include "appdynamics-cloud-k8s-monitoring.getNamespace" . }}:{{ .Values.logCollectorServiceAccount }} +volumes: + - configMap + - hostPath # LCA uses hostPath for config and registry files + - secret + - emptyDir + - downwardAPI + - persistentVolumeClaim + - projected +{{- end }} +{{- end }} diff --git a/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-cloud-k8s-monitoring/templates/rbac/logcollector_service_account.yaml b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-cloud-k8s-monitoring/templates/rbac/logcollector_service_account.yaml new file mode 100644 index 00000000..d917f7ba --- /dev/null +++ b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-cloud-k8s-monitoring/templates/rbac/logcollector_service_account.yaml @@ -0,0 +1,7 @@ +{{ if .Values.install.logCollector -}} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ .Values.logCollectorServiceAccount }} + namespace: {{ include "appdynamics-cloud-k8s-monitoring.getNamespace" . }} +{{- end }} diff --git a/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-cloud-k8s-monitoring/templates/servermon.yaml b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-cloud-k8s-monitoring/templates/servermon.yaml new file mode 100644 index 00000000..591b25e2 --- /dev/null +++ b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-cloud-k8s-monitoring/templates/servermon.yaml @@ -0,0 +1,61 @@ +{{ if and .Values.install.defaultInfraCollectors (has "linux" .Values.servermonConfig.os) -}} +apiVersion: cluster.appdynamics.com/v1alpha1 +kind: Servermon +metadata: + name: {{ printf "%s-%s" .Release.Name "appdynamics-servermon" | trunc 63 }} + namespace: {{ include "appdynamics-cloud-k8s-monitoring.getNamespace" . }} +spec: + os: linux + {{$servermonConfig := .Values.servermonConfig}} + {{ if (.Values.servermonConfig.env).linux -}} + {{$servermonConfig = mustMergeOverwrite .Values.servermonConfig .Values.servermonConfig.env.linux}} + {{- end }} + {{ with $servermonConfig -}} + {{ if .gatherInterval -}} + gatherInterval: {{ .gatherInterval }} + {{- end }} + logFilesMaxSizeMb: {{ .logFilesMaxSizeMb }} + logFilesNumBackups: {{ .logFilesNumBackups }} + logLevel: {{ .logLevel }} + + {{ if .skipProviderCheck -}} + skipProviderCheck: {{ .skipProviderCheck | quote }} + {{- end }} + + {{ if .exporterPort -}} + exporterPort: {{ .exporterPort }} + {{- end }} + + {{- end }} +{{- end }} +--- +{{ if and .Values.install.defaultInfraCollectors (has "windows" .Values.servermonConfig.os) -}} +apiVersion: cluster.appdynamics.com/v1alpha1 +kind: Servermon +metadata: + name: {{ printf "%s-%s" .Release.Name "appdynamics-servermon-windows" | trunc 63 }} + namespace: {{ include "appdynamics-cloud-k8s-monitoring.getNamespace" . }} +spec: + os: windows + {{$servermonConfig := .Values.servermonConfig}} + {{ if (.Values.servermonConfig.env).windows -}} + {{$servermonConfig = mustMergeOverwrite .Values.servermonConfig .Values.servermonConfig.env.windows}} + {{- end }} + {{ with $servermonConfig -}} + {{ if .gatherInterval -}} + gatherInterval: {{ .gatherInterval }} + {{- end }} + logFilesMaxSizeMb: {{ .logFilesMaxSizeMb }} + logFilesNumBackups: {{ .logFilesNumBackups }} + logLevel: {{ .logLevel }} + + {{ if .skipProviderCheck -}} + skipProviderCheck: {{ .skipProviderCheck | quote }} + {{- end }} + + {{ if .exporterPort -}} + exporterPort: {{ .exporterPort }} + {{- end }} + + {{- end }} +{{- end }} diff --git a/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-cloud-k8s-monitoring/values.schema.json b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-cloud-k8s-monitoring/values.schema.json new file mode 100644 index 00000000..94d93d6f --- /dev/null +++ b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-cloud-k8s-monitoring/values.schema.json @@ -0,0 +1,1499 @@ +{ + "$schema": "http://json-schema.org/schema#", + "type": "object", + "title": "Values", + "additionalProperties": false, + "properties": { + "global": { + "type": "object", + "additionalProperties": false, + "properties": { + "smartAgentInstall": { + "description": "enabled when installed via smartagent. Installation takes all the common values used by the smartagent.", + "type": "boolean" + }, + "namespace": { + "description": "namespace to use when installed via smartagent", + "type": "string" + }, + "helmChartVersion": { + "description": "version of helm chart", + "type": "string" + }, + "clusterName": { + "description": "name of cluster", + "type": "string" + }, + "clusterId": { + "description": "The uid of kube-system namespace, required when helm lookup is not supported", + "type": "string" + }, + "oauth": { + "type": "object", + "properties": { + "clientId": { + "type": "string", + "description": "AppDynamics oauth2 client id" + }, + "clientSecret": { + "type": "string", + "description": "AppDynamics oauth2 client secret plain text" + }, + "clientIdEnvVar": { + "type": "object", + "additionalProperties": false, + "description": "The clientIdEnvVar Schema", + "properties": { + "valueFrom": { + "type": "object", + "properties": { + "secretKeyRef": { + "type": "object", + "additionalProperties": false, + "properties": { + "name": { + "type": "string" + }, + "key": { + "type": "string" + } + } + } + } + } + } + }, + "clientSecretEnvVar": { + "type": "object", + "description": "The clientSecretEnvVar Schema", + "properties": { + "valueFrom": { + "type": "object", + "properties": { + "secretKeyRef": { + "type": "object", + "properties": { + "name": { + "type": "string" + }, + "key": { + "type": "string" + } + } + } + } + } + } + }, + "endpoint": { + "type": "string", + "description": "AppDynamics endpoint url" + }, + "tokenUrl": { + "type": "string", + "description": "AppDynamics oauth2 token refresh url" + } + }, + "allOf": [ + { + "anyOf": [ + { + "required": ["clientSecret"] + }, + { + "required": ["clientSecretEnvVar"] + } + ] + }, + { + "anyOf": [ + { + "required": ["clientId"] + }, + { + "required": ["clientIdEnvVar"] + } + ] + } + ] + }, + "agentManagementProxy": { + "type": "object", + "properties": { + "httpProxy": { + "type": "string", + "description": "http proxy for agent management" + }, + "httpsProxy": { + "type": "string", + "description": "https proxy for agent management" + }, + "noProxy": { + "type": "array", + "description": "hostnames to skip proxying the request for agent management", + "items": { + "type": "string" + } + } + } + }, + "tls": { + "type": "object", + "properties": { + "appdCollectors": { + "type": "object", + "additionalProperties": false, + "properties": { + "enabled": { + "description": "enable TLS for communication between Cisco AppDynamics Collectors and the Cisco AppDynamics Distribution of OpenTelemetry Collector (service).", + "type": "boolean" + }, + "secret": { + "type": "object", + "additionalProperties": false, + "properties": { + "secretName": { + "description": "name of the Kubernetes secret that holds the certificates", + "type": "string" + }, + "secretKeys": { + "description": "secret keys for specifying TLS certificate, key, and CA certificate", + "type": "object", + "additionalProperties": false, + "properties": { + "caCert": { + "description": "kubernetes secret key name that points to the CA certificate", + "type": "string" + }, + "tlsCert": { + "description": "kubernetes secret key name that points to the TLS certificate", + "type": "string" + }, + "tlsKey": { + "description": "kubernetes secret key name that points to the TLS key", + "type": "string" + } + } + } + } + } + } + } + } + } + } + }, + "install": { + "description": "contain flags to control installation of cluster collector, infra collector and log collector", + "type": "object", + "additionalProperties": false, + "properties": { + "clustermon": { + "type": "boolean" + }, + "defaultInfraCollectors": { + "type": "boolean" + }, + "logCollector": { + "type": "boolean" + } + }, + "required": [ + "clustermon", + "defaultInfraCollectors", + "logCollector" + ] + }, + "agentManagementEnabled": { + "description": "opt in and out from Agent Management for cluster collector, infra collector and log collector", + "type": "object", + "additionalProperties": false, + "properties": { + "clustermon": { + "type": "boolean" + }, + "defaultInfraCollectors": { + "type": "boolean" + }, + "logCollector": { + "type": "boolean" + } + } + }, + "clustermonServiceAccount": { + "description": "serviceAccount name for clustermon", + "type": "string" + }, + "inframonServiceAccount": { + "description": "serviceAccount name for inframon", + "type": "string" + }, + "logCollectorServiceAccount": { + "description": "serviceAccount name for log collector", + "type": "string" + }, + "clustermonConfig": { + "description": "configuration for clustermon", + "type": "object", + "additionalProperties": false, + "properties": { + "logLevel": { + "description": "log level for cluster collector", + "type": "string" + }, + "logFilesMaxSizeMb": { + "description": "maximum size of one log file (in MB)", + "type": "integer" + }, + "logFilesNumBackups": { + "description": "maximum number of log files created before overwriting the oldest one", + "type": "integer" + }, + "os": { + "description": "operating system of the nodes on which cluster collector will be installed", + "type": "string", + "enum": ["linux", "windows"] + }, + "printToStdout": { + "description": "Whether Kubernetes logs will report logs.", + "type": "string" + }, + "filters": { + "description": "filters based on namespace, entity and label for monitoring K8s objects", + "type": "object", + "additionalProperties": false, + "properties": { + "namespace": { + "type": "object", + "additionalProperties": false, + "properties": { + "includeRegex": { + "type": "string" + }, + "excludeRegex": { + "type": "string" + } + } + }, + "entity": { + "type": "object", + "additionalProperties": false, + "properties": { + "excludeRegex": { + "type": "string" + }, + "excludeLabels": { + "type": "array", + "items": { + "type": "object" + } + } + } + }, + "label": { + "type": "object", + "additionalProperties": false, + "properties": { + "excludeRegex": { + "type": "string" + } + } + }, + "annotation": { + "type": "object", + "additionalProperties": false, + "properties": { + "excludeRegex": { + "type": "string" + } + } + }, + "scopedFilters": { + "type": "array", + "items": { + "type": "object", + "additionalProperties": false, + "properties": { + "scope": { + "type": "object", + "additionalProperties": false, + "properties": { + "namespaceRegex": { + "type": "string" + }, + "entityTypes": { + "type": "array", + "items": { + "type": "string", + "enum": ["configmap", "cronjob", "daemonset", "deployment", "horizontalpodautoscaler", "job", "replicaset", "replicationcontroller", "secret", "statefulset"] + } + } + } + }, + "entityFilter": { + "type": "object", + "additionalProperties": false, + "properties": { + "excludeRegex": { + "type": "string" + }, + "excludeLabels": { + "type": "array", + "items": { + "type": "object" + } + } + } + } + } + } + } + } + }, + "ingressControllers": { + "description": "ingressControllers property", + "type": "object" + }, + "events": { + "description": "configuration for collecting events", + "type": "object" + }, + "configurations": { + "description": "enable or disable monitoring Configurations (ConfigMap, Secret)", + "type": "object", + "additionalProperties": false, + "properties": { + "enabled": { + "type": "boolean" + } + } + }, + "maxGoRoutine": { + "description": "maximum number of go routines used", + "type": "integer", + "minimum": 1, + "maximum": 10 + }, + "helmChartVersion": { + "description": "version of the helm chart used", + "type": "string" + }, + "labels": { + "description": "allows you to enable or disable labels collection", + "type": "object", + "additionalProperties": false, + "properties": { + "enabled": { + "type": "boolean" + } + } + }, + "autoscalers": { + "description": "allows you to enable or disable monitoring autoscalers (hpa)", + "type": "object", + "additionalProperties": false, + "properties": { + "hpaVpaEnabled": { + "type": "boolean" + } + } + }, + "profiler": { + "description": "allow you to enable or disable profiler", + "type": "object", + "additionalProperties": false, + "properties": { + "enabled": { + "type": "boolean" + }, + "port": { + "type": "integer" + } + } + } + } + }, + "infraManagerConfig": { + "description": "configuration for infra manager", + "type": "object", + "additionalProperties": false, + "properties": { + "logLevel": { + "description": "log level for infra manager", + "type": "string" + }, + "logFilesMaxSizeMb": { + "description": "maximum size of one log file for infra manager", + "type": "number" + }, + "logFilesNumBackups": { + "description": "maximum number of log files created for infra manager before overwriting the oldest one", + "type": "number" + }, + "printToStdout": { + "description": "determines whether infra manager logs report through Kubernetes logs", + "type": "string" + } + } + }, + "servermonConfig": { + "description": "configuration for server collector", + "type": "object", + "additionalProperties": false, + "properties": { + "os": { + "description": "operating system of the nodes on which server collector will be installed", + "type": "array", + "items": { + "type": "string", + "enum": ["linux", "windows"] + } + }, + "logLevel": { + "description": "log level for server collector", + "type": "string" + }, + "logFilesMaxSizeMb": { + "description": "maximum size of one log file for server collector", + "type": "number" + }, + "logFilesNumBackups": { + "description": "maximum number of log files created for server collector before overwriting the oldest one", + "type": "number" + }, + "exporterPort": { + "description": "port used to start Node Exporter in Linux", + "type": "number" + }, + "gatherInterval": { + "description": "gather interval for server collector", + "type": "number", + "enum": [10, 20, 30, 60] + }, + "skipProviderCheck": { + "description": "skip the cloud provider check in server collector", + "type": "boolean" + }, + "env": { + "description": "override the server collector configuration for a specific operating system", + "type": "object" + } + }, + "required": [ + "os" + ] + }, + "containermonConfig": { + "description": "configuration for container collector", + "type": "object", + "additionalProperties": false, + "properties": { + "os": { + "description": "operating system of the nodes on which container collector will be installed", + "type": "array", + "items": { + "type": "string", + "enum": ["linux", "windows"] + } + }, + "logLevel": { + "description": "log level for container collector", + "type": "string" + }, + "logFilesMaxSizeMb": { + "description": "maximum size of one log file for container collector", + "type": "number" + }, + "logFilesNumBackups": { + "description": "maximum number of log files created for container collector before overwriting the oldest one", + "type": "number" + }, + "gatherInterval": { + "description": "gather interval for container collector", + "type": "number", + "enum": [10, 20, 30, 60] + }, + "env": { + "description": "override the container collector configuration for a specific operating system", + "type": "object" + } + }, + "required": [ + "os" + ] + }, + "logCollectorConfig": { + "description": "configuration for log collector", + "type": "object", + "additionalProperties": false, + "properties": { + "os": { + "description": "OS on which Log Collector should be deployed, accepted values are linux and windows", + "type": "array", + "items": { + "type": "string" + } + }, + "env": { + "description": "Specifies OS-specific overrides.", + "type": "object", + "additionalProperties": false, + "properties": { + "linux": { + "type": "object", + "additionalProperties": false, + "properties": { + "container": { + "$ref": "#/definitions/logCollectorConfigContainerProp" + }, + "filebeatYaml": { + "description": "Advanced config for LCA", + "type": "string" + } + } + }, + "windows": { + "type": "object", + "additionalProperties": false, + "properties": { + "container": { + "$ref": "#/definitions/logCollectorConfigContainerProp" + }, + "filebeatYaml": { + "description": "Advanced config for LCA", + "type": "string" + } + } + } + } + }, + "container": { + "$ref": "#/definitions/logCollectorConfigContainerProp" + }, + "filebeatYaml": { + "description": "Advanced config for LCA", + "type": "string" + } + } + }, + "clustermonPod": { + "description": "configuration for clustermon pod", + "type": "object", + "additionalProperties": false, + "properties": { + "image": { + "description": "image URL of cluster collector", + "type": "string" + }, + "imagePullPolicy": { + "description": "image pull policy for cluster collector image", + "type": "string" + }, + "affinity": { + "description": "affinity for the cluster collector pod", + "type": "object" + }, + "annotations": { + "description": "annotations for the cluster collector pod.", + "type": "object" + }, + "imagePullSecrets": { + "description": "image pull secrets for cluster collector image", + "type": "array", + "items": { + "type": "object" + } + }, + "labels": { + "description": "labels for the cluster collector pod", + "type": "object" + }, + "nodeSelector": { + "description": "node selector for the cluster collector pod", + "type": "object" + }, + "priorityClassName": { + "description": "name of the pod priority class, which is used in the cluster collector pod specification to set the priority.", + "type": "string" + }, + "resources": { + "type": "object", + "description": "resources for the cluster collector pod", + "properties": { + "limits": { + "type": "object", + "properties": { + "cpu": { + "type": "string" + }, + "memory": { + "type": "string" + } + } + }, + "requests": { + "type": "object", + "properties": { + "cpu": { + "type": "string" + }, + "memory": { + "type": "string" + } + } + } + } + }, + "securityContext": { + "description": "security context for the cluster collector pod.", + "type": "object" + }, + "tolerations": { + "description": "tolerations for the cluster collector pod.", + "type": "array", + "items": { + "type": "object" + } + } + }, + "required": [ + "image" + ] + }, + "inframonPod": { + "description": "configuration for inframon pod", + "type": "object", + "additionalProperties": false, + "properties": { + "image": { + "description": "image URL of infra collector", + "type": "string" + }, + "imagePullPolicy": { + "description": "image pull policy for infra collector image", + "type": "string" + }, + "imagePullSecrets": { + "description": "image pull secrets for infra collector image", + "type": "array", + "items": { + "type": "object" + } + }, + "priorityClassName": { + "description": "name of the pod priority class, which is used in the infra collector pod specification to set the priority", + "type": "string" + }, + "resources": { + "description": "resources for the infra collector pod", + "type": "object", + "properties": { + "limits": { + "type": "object", + "properties": { + "cpu": { + "type": "string" + }, + "memory": { + "type": "string" + } + } + }, + "requests": { + "type": "object", + "properties": { + "cpu": { + "type": "string" + }, + "memory": { + "type": "string" + } + } + } + } + }, + "labels": { + "description": "labels for the infra collector pod", + "type": "object" + }, + "annotations": { + "description": "annotations for the infra collector pod", + "type": "object" + }, + "nodeSelector": { + "description": "node selector for the infra collector pod", + "type": "object" + }, + "affinity": { + "description": "affinity for the infra collector pod", + "type": "object" + }, + "tolerations": { + "description": "tolerations for the infra collector pod", + "type": "array", + "items": { + "type": "object" + } + }, + "securityContext": { + "description": "security context for the infra collector pod", + "type": "object" + }, + "env": { + "description": "override the infra collector pod configuration for a specific operating system", + "type": "object" + } + }, + "required": [ + "image" + ] + }, + "windowsExporterPod": { + "description": "configuration for windowsExporter pod", + "type": "object", + "additionalProperties": false, + "properties": { + "image": { + "description": "image URL of windows exporter", + "type": "string" + }, + "imagePullPolicy": { + "description": "image pull policy for windows exporter image", + "type": "string" + }, + "imagePullSecrets": { + "description": "image pull secrets for windows exporter image", + "type": "array", + "items": { + "type": "object" + } + }, + "priorityClassName": { + "description": "name of the pod priority class, which is used in the windows exporter pod specification to set the priority", + "type": "string" + }, + "resources": { + "description": "resources for the windows exporter pod", + "type": "object", + "properties": { + "limits": { + "type": "object", + "properties": { + "cpu": { + "type": "string" + }, + "memory": { + "type": "string" + } + } + }, + "requests": { + "type": "object", + "properties": { + "cpu": { + "type": "string" + }, + "memory": { + "type": "string" + } + } + } + } + }, + "labels": { + "description": "labels for the windows exporter pod", + "type": "object" + }, + "annotations": { + "description": "annotations for the windows exporter pod", + "type": "object" + }, + "nodeSelector": { + "description": "node selector for the windows exporter pod", + "type": "object" + }, + "affinity": { + "description": "affinity for the windows exporter pod", + "type": "object" + }, + "tolerations": { + "description": "tolerations for the windows exporter pod", + "type": "array", + "items": { + "type": "object" + } + }, + "securityContext": { + "description": "security context for the windows exporter pod", + "type": "object" + } + }, + "required": [ + "image" + ] + }, + "logCollectorPod": { + "description": "configuration for log collector pod", + "type": "object", + "additionalProperties": false, + "properties": { + "image": { + "description": "image URL of log collector", + "type": "string" + }, + "imagePullPolicy": { + "description": "image pull policy for log collector image", + "type": "string" + }, + "imagePullSecrets": { + "description": "image pull secrets for log collector image", + "type": "array", + "items": { + "type": "object" + } + }, + "resources": { + "description": "resources for the log collector pod", + "type": "object", + "additionalProperties": false, + "properties": { + "limits": { + "type": "object", + "additionalProperties": false, + "properties": { + "cpu": { + "type": "string" + }, + "memory": { + "type": "string" + } + } + }, + "requests": { + "type": "object", + "additionalProperties": false, + "properties": { + "cpu": { + "type": "string" + }, + "memory": { + "type": "string" + } + } + } + } + }, + "labels": { + "description": "labels for the log collector pod", + "type": "object" + }, + "annotations": { + "description": "annotations for the log collector pod", + "type": "object" + }, + "nodeSelector": { + "description": "node selector for the log collector pod", + "type": "object" + }, + "affinity": { + "description": "affinity for the log collector pod", + "type": "object" + }, + "tolerations": { + "description": "tolerations for the log collector pod", + "type": "array", + "items": { + "type": "object" + } + }, + "securityContext": { + "description": "security context for the log collector pod", + "type": "object" + }, + "env": { + "description": "override the log collector pod configuration for a specific operating system", + "type": "object" + }, + "rollingUpdateMaxUnavailable": { + "description": "override the number of log collector pods that can be unavailable during the update process", + "type": [ + "string", + "integer" + ] + } + }, + "required": [ + "image" + ] + } + }, + "definitions": { + "logCollectorSimpleConditionProp": { + "description": "Log Collector's simple condition format (operator, key, value)", + "type": "object", + "additionalProperties": false, + "properties": { + "key": { + "type": "string" + }, + "operator": { + "enum": ["equals", "contains", "regexp", "range", "network"] + }, + "value": { + "type": "string" + } + } + }, + "logCollectorFilebeatConditionProp": { + "description": "Log Collector's condition format (follows Filebeat's autodiscover condition format)", + "type": "object", + "additionalProperties": false, + "properties": { + "equals": { + "type": "object" + }, + "contains": { + "type": "object" + }, + "regexp": { + "type": "object" + }, + "range": { + "type": "object" + }, + "network": { + "type": "object" + }, + "has_fields": { + "type": "array", + "items": { + "type": "string" + } + }, + "or": { + "type": "array", + "items": { + "$ref": "#/definitions/logCollectorFilebeatConditionProp" + } + }, + "and": { + "type": "array", + "items": { + "$ref": "#/definitions/logCollectorFilebeatConditionProp" + } + }, + "not": { + "$ref": "#/definitions/logCollectorFilebeatConditionProp" + } + } + }, + "logCollectorConfigContainerProp": { + "description": "Log collector config for log collection from containers", + "type": "object", + "additionalProperties": false, + "properties": { + "monitorCollectors": { + "description": "Enables or disables log collection from the Log Collector and other collectors running on your cluster", + "type": [ + "boolean", + "string" + ] + }, + "defaultConfig": { + "description": "Default condition for harvesting logs from any container on your cluster", + "type": "object", + "additionalProperties": false, + "properties": { + "enabled": { + "description": "Enable default log collection", + "type": [ + "boolean", + "string" + ] + }, + "multiLinePattern": { + "type": "string" + }, + "multiLineMatch": { + "type": "string" + }, + "multiLineNegate": { + "type": [ + "boolean", + "string" + ] + }, + "logFormat": { + "type": "string" + }, + "messageParser": { + "description": "Single-line message pattern for messages. Include only one log type in each condition block, and delete all others", + "type": "object", + "additionalProperties": false, + "properties": { + "timestamp": { + "description": "Single-line message pattern for Timestamp parser", + "type": "object", + "additionalProperties": false, + "properties": { + "enabled": { + "type": [ + "boolean", + "string" + ] + }, + "format": { + "type": "string" + } + } + }, + "logback": { + "description": "Single-line message pattern for logback parser", + "type": "object", + "additionalProperties": false, + "properties": { + "enabled": { + "type": [ + "boolean", + "string" + ] + }, + "pattern": { + "type": "string" + } + } + }, + "json": { + "description": "Single-line message pattern for json parser", + "type": "object", + "additionalProperties": false, + "properties": { + "enabled": { + "type": [ + "boolean", + "string" + ] + }, + "timestampField": { + "type": "string" + }, + "timestampPattern": { + "type": "string" + } + } + }, + "log4J": { + "description": "Single-line message pattern for log4J parser", + "type": "object", + "additionalProperties": false, + "properties": { + "enabled": { + "type": [ + "boolean", + "string" + ] + }, + "pattern": { + "type": "string" + } + } + }, + "grok": { + "description": "Single-line message pattern for grok parser", + "type": "object", + "additionalProperties": false, + "properties": { + "enabled": { + "type": [ + "boolean", + "string" + ] + }, + "patterns": { + "type": "array", + "items": { + "type": "string" + } + }, + "timestampField": { + "type": "string" + }, + "timestampPattern": { + "type": "string" + } + } + }, + "infra": { + "description": "Single-line message pattern for Kubernetes infrastructure log messages", + "type": "object", + "additionalProperties": false, + "properties": { + "enabled": { + "type": [ + "boolean", + "string" + ] + } + } + }, + "multi": { + "description": "Applies multiple parsers to a single log message", + "type": "object", + "additionalProperties": false, + "properties": { + "enabled": { + "type": [ + "boolean", + "string" + ] + }, + "parsers": { + "type": "string" + } + } + }, + "subparsers": { + "description": "Applies subparsers to each Grok log message", + "type": "string" + } + } + } + } + }, + "conditionalConfigs": { + "description": "The block which contains all the settings for a specific log source, type, and pattern as a pair of condition+config blocks. There can be multiple condition+config pairs within conditionalConfigs", + "type": "array", + "items": { + "type": "object", + "properties": { + "condition": { + "oneOf": [ + { + "$ref": "#/definitions/logCollectorSimpleConditionProp" + }, + { + "$ref": "#/definitions/logCollectorFilebeatConditionProp" + } + ] + }, + "config": { + "type": "object", + "additionalProperties": false, + "properties": { + "logFormat": { + "type": "string" + }, + "multiLinePattern": { + "type": "string" + }, + "multiLineNegate": { + "type": [ + "boolean", + "string" + ] + }, + "multiLineMatch": { + "type": "string" + }, + "messageParser": { + "description": "Single-line message pattern for messages. Include only one log type in each condition block, and delete all others", + "type": "object", + "additionalProperties": false, + "properties": { + "timestamp": { + "description": "Single-line message pattern for Timestamp parser", + "type": "object", + "additionalProperties": false, + "properties": { + "enabled": { + "type": [ + "boolean", + "string" + ] + }, + "format": { + "type": "string" + } + } + }, + "logback": { + "description": "Single-line message pattern for logback parser", + "type": "object", + "additionalProperties": false, + "properties": { + "enabled": { + "type": [ + "boolean", + "string" + ] + }, + "pattern": { + "type": "string" + } + } + }, + "json": { + "description": "Single-line message pattern for json parser", + "type": "object", + "additionalProperties": false, + "properties": { + "enabled": { + "type": [ + "boolean", + "string" + ] + }, + "timestampField": { + "type": "string" + }, + "timestampPattern": { + "type": "string" + } + } + }, + "log4J": { + "description": "Single-line message pattern for log4J parser", + "type": "object", + "additionalProperties": false, + "properties": { + "enabled": { + "type": [ + "boolean", + "string" + ] + }, + "pattern": { + "type": "string" + } + } + }, + "grok": { + "description": "Single-line message pattern for grok parser", + "type": "object", + "additionalProperties": false, + "properties": { + "enabled": { + "type": [ + "boolean", + "string" + ] + }, + "patterns": { + "type": "array", + "items": { + "type": "string" + } + }, + "timestampField": { + "type": "string" + }, + "timestampPattern": { + "type": "string" + } + } + }, + "infra": { + "description": "Single-line message pattern for Kubernetes infrastructure log messages", + "type": "object", + "additionalProperties": false, + "properties": { + "enabled": { + "type": [ + "boolean", + "string" + ] + } + } + }, + "multi": { + "description": "Applies multiple parsers to a single log message", + "type": "object", + "additionalProperties": false, + "properties": { + "enabled": { + "type": [ + "boolean", + "string" + ] + }, + "parsers": { + "type": "string" + } + } + }, + "subparsers": { + "description": "Applies subparsers to each Grok log message", + "type": "string" + } + } + } + } + } + } + } + }, + "excludeCondition": { + "$ref": "#/definitions/logCollectorFilebeatConditionProp" + }, + "dropFields": { + "type": "array", + "items": { + "type": "string" + } + }, + "logging": { + "type": "object", + "additionalProperties": false, + "properties": { + "level": { + "type": "string" + }, + "selectors": { + "type": "array", + "items": { + "type": "string" + } + }, + "files": { + "type": "object", + "additionalProperties": false, + "properties": { + "enabled": { + "description": "Enable logging to files", + "type": [ + "boolean", + "string" + ] + }, + "keepFiles": { + "type": "integer" + } + } + }, + "metrics": { + "type": "object", + "additionalProperties": false, + "properties": { + "enabled": { + "description": "Enable metrics logging", + "type": [ + "boolean", + "string" + ] + }, + "period": { + "type": "string" + } + } + } + } + }, + "batchSize": { + "type": "integer" + }, + "maxBytes": { + "type": "integer" + }, + "summaryDebugLogsInterval": { + "description": "Logging interval. Example: 10s.", + "type": "string" + }, + "worker": { + "description": "The number of worker threads to an output host. Default: 1. ", + "type": [ + "string", + "integer" + ] + }, + "monitoring": { + "type": "object", + "additionalProperties": false, + "properties": { + "otlpmetric": { + "type": "object", + "additionalProperties": false, + "properties": { + "enabled": { + "type": [ + "boolean", + "string" + ] + }, + "endpoint": { + "type": "string" + }, + "protocol": { + "enum": ["grpc", "http"] + }, + "collectPeriod": { + "type": "string" + }, + "reportPeriod": { + "type": "string" + }, + "resourceAttrs": { + "type": "array", + "items": { + "type": "object", + "additionalProperties": false, + "properties": { + "key": { + "type": "string" + }, + "value": { + "type": "string" + } + } + } + }, + "metrics": { + "type": "array", + "items": { + "type": "string" + } + }, + "retry": { + "enabled": { + "type": [ + "boolean", + "string" + ] + }, + "initialInterval": { + "type": "string" + }, + "maxInterval": { + "type": "string" + }, + "maxElapsedTime": { + "type": "string" + } + }, + "ssl": { + "enabled": { + "type": [ + "boolean", + "string" + ] + }, + "certificateAuthorities": { + "type": "array", + "items": { + "type": "string" + } + }, + "certificate": { + "type": "string" + }, + "key": { + "type": "string" + } + } + } + } + } + } + } + } + } +} diff --git a/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-cloud-k8s-monitoring/values.yaml b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-cloud-k8s-monitoring/values.yaml new file mode 100644 index 00000000..7e563dee --- /dev/null +++ b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-cloud-k8s-monitoring/values.yaml @@ -0,0 +1,254 @@ +global: + smartAgentInstall: false + namespace: "" + clusterName: "" + tls: + appdCollectors: + enabled: false + secret: {} + oauth: + clientId: "" + # Either of clientSecret or clientSecretEnvVar will be present + clientSecret: "" + # clientSecretEnvVar: + # valueFrom: + # secretKeyRef: + # name: "" + # key: "" + endpoint: "" + tokenUrl: "" + agentManagementProxy: + httpProxy: "" + httpsProxy: "" + noProxy: [] + +install: + clustermon: true + defaultInfraCollectors: true + logCollector: false + +agentManagementEnabled: + clustermon: true + defaultInfraCollectors: true + logCollector: true + +# RBAC config +clustermonServiceAccount: appdynamics-clustermon +inframonServiceAccount: appdynamics-inframon +logCollectorServiceAccount: appdynamics-log-collector + +# Clustermon Configs +clustermonConfig: + os: linux + logLevel: info + logFilesMaxSizeMb: 10 + logFilesNumBackups: 4 + printToStdout: "true" + + filters: + namespace: + includeRegex: ".*" + excludeRegex: "" + entity: + excludeRegex: "" + excludeLabels: [] + label: + excludeRegex: "" + annotation: + excludeRegex: "" + scopedFilters: [] + ingressControllers: {} + + labels: + enabled: true + + events: + enabled: true + severityToExclude: [] + reasonToExclude: [] + severeGroupByReason: [] + + configurations: + enabled: true + autoscalers: + hpaVpaEnabled: true + +# Infra Manager Configs +infraManagerConfig: + logLevel: info + logFilesMaxSizeMb: 10 + logFilesNumBackups: 4 + printToStdout: "true" + +# Servermon Configs +servermonConfig: + os: [linux] + logLevel: info + logFilesMaxSizeMb: 10 + logFilesNumBackups: 4 + +# Containermon Configs +containermonConfig: + os: [linux] + logLevel: info + logFilesMaxSizeMb: 10 + logFilesNumBackups: 4 + +# LogCollector Configs +logCollectorConfig: + os: [linux] + container: + monitorCollectors: false + defaultConfig: + enabled: true + conditionalConfigs: [] + dropFields: [] + batchSize: 1000 + worker: 1 + maxBytes: 1000000 + summaryDebugLogsInterval: 10s + logging: + level: info + selectors: [] + files: + enabled: false + keepFiles: 5 + metrics: + enabled: false + period: 30s + monitoring: + otlpmetric: + enabled: false + endpoint: "${APPD_OTELCOL_GRPC_RECEIVER_HOST}:${APPD_OTELCOL_GRPC_RECEIVER_PORT}" + protocol: grpc + resourceAttrs: [] + # default metrics to capture + metrics: + - beat.memstats.memory_alloc + - filebeat.events.active + - libbeat.output.read.errors + - libbeat.output.write.bytes + - libbeat.output.write.errors + - system.load.norm.5 + - system.load.norm.15 + - filebeat.input.filestream.harvester.running + - filebeat.input.filestream.harvester.stopped + - filebeat.input.filestream.files.open + - filebeat.input.filestream.events.eof + - filebeat.input.filestream.events.write + - filebeat.input.filestream.events.create + - filebeat.input.filestream.events.rename + - filebeat.input.filestream.events.delete + - filebeat.input.filestream.events.truncate + retry: + enabled: false + ssl: + enabled: false + certificateAuthorities: [] + filebeatYaml: "" + env: + linux: + filebeatYaml: "" + windows: + filebeatYaml: "" + +# Deployment specific configs +clustermonPod: + image: appdynamics/appdynamics-cloud-k8s-monitoring:24.7.0-2057 + imagePullPolicy: Always + resources: + limits: + cpu: 1000m + memory: 1000Mi + requests: + cpu: 500m + memory: 750Mi + labels: {} + annotations: {} + nodeSelector: {} + imagePullSecrets: [] + affinity: {} + tolerations: [] + securityContext: {} + +# Daemonset specific configs +inframonPod: + image: appdynamics/appdynamics-cloud-k8s-monitoring:24.7.0-2057 + imagePullPolicy: Always + resources: + limits: + cpu: 350m + memory: 100Mi + requests: + cpu: 200m + memory: 64Mi + labels: {} + annotations: {} + nodeSelector: {} + imagePullSecrets: [] + affinity: {} + tolerations: [] + securityContext: {} + env: + linux: + nodeSelector: + kubernetes.io/os: linux + windows: + resources: + limits: + cpu: 350m + memory: 300Mi + requests: + cpu: 200m + memory: 150Mi + nodeSelector: + kubernetes.io/os: windows + +windowsExporterPod: + image: ghcr.io/prometheus-community/windows-exporter:0.24.0 + imagePullPolicy: Always + resources: + limits: + cpu: 200m + memory: 200Mi + requests: + cpu: 100m + memory: 100Mi + labels: {} + annotations: {} + nodeSelector: {} + imagePullSecrets: [] + affinity: {} + tolerations: [] + securityContext: {} + +# Daemonset specific configs +logCollectorPod: + image: appdynamics/appdynamics-cloud-log-collector-agent:24.4.0-1163 + imagePullPolicy: Always + resources: + limits: + cpu: 400m + memory: 512Mi + requests: + cpu: 10m + memory: 150Mi + labels: {} + annotations: {} + nodeSelector: {} + imagePullSecrets: [] + affinity: {} + tolerations: [] + securityContext: {} + # MaxUnavailable replicas for Daemonset rolling update. This can be both absolute value(int) or in percentage. + rollingUpdateMaxUnavailable: 100% + env: + linux: + nodeSelector: + kubernetes.io/os: linux + windows: + nodeSelector: + kubernetes.io/os: windows + + + diff --git a/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-csaas-k8s-cluster-agent/.helmignore b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-csaas-k8s-cluster-agent/.helmignore new file mode 100644 index 00000000..88b6008a --- /dev/null +++ b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-csaas-k8s-cluster-agent/.helmignore @@ -0,0 +1,25 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*.orig +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ +# CA bundle asc files +*.asc diff --git a/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-csaas-k8s-cluster-agent/Chart.lock b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-csaas-k8s-cluster-agent/Chart.lock new file mode 100644 index 00000000..6e0f5f13 --- /dev/null +++ b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-csaas-k8s-cluster-agent/Chart.lock @@ -0,0 +1,6 @@ +dependencies: +- name: metrics-server + repository: https://kubernetes-sigs.github.io/metrics-server/ + version: 3.7.0 +digest: sha256:284524138c4288858ee1e1a748625f9de5f8e374b99a370c2a64671b260325e1 +generated: "2024-07-29T04:41:11.677638212Z" diff --git a/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-csaas-k8s-cluster-agent/Chart.yaml b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-csaas-k8s-cluster-agent/Chart.yaml new file mode 100644 index 00000000..76317229 --- /dev/null +++ b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-csaas-k8s-cluster-agent/Chart.yaml @@ -0,0 +1,25 @@ +apiVersion: v2 +appVersion: 24.1.0 +dependencies: +- condition: install.metrics-server + name: metrics-server + repository: https://kubernetes-sigs.github.io/metrics-server/ + version: 3.7.0 +description: Sophisticated helm chart to deploy cluster agent and machine agent in + addition with advanced features like multiple cluster agent deployments, Granular + control over config with intuitive boolean switches +home: https://appdynamics.com +icon: https://raw.githubusercontent.com/CiscoDevNet/appdynamics-charts/master/logo.png +keywords: +- appdynamics +- cluster +- kubernetes +- openshift +- monitoring +- pod +- deployment +maintainers: +- email: support@appdynamics.com + name: AppDynamics +name: appdynamics-csaas-k8s-cluster-agent +version: 1.19.0 diff --git a/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-csaas-k8s-cluster-agent/README.md b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-csaas-k8s-cluster-agent/README.md new file mode 100644 index 00000000..da3d424d --- /dev/null +++ b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-csaas-k8s-cluster-agent/README.md @@ -0,0 +1,37 @@ +# Appdynamics Helm Chart + +### Add AppDynamics helm repo + +### Create values yaml to override default ones +```yaml +installClusterAgent: true +installInfraViz: false + +imageInfo: + agentImage: docker.io/appdynamics/cluster-agent + agentTag: 22.1.0 # Will be used for operator pod + machineAgentImage: docker.io/appdynamics/machine-agent + machineAgentTag: latest + machineAgentWinImage: docker.io/appdynamics/machine-agent-analytics + machineAgentWinTag: win-latest + netVizImage: docker.io/appdynamics/machine-agent-netviz + netvizTag: latest + +controllerInfo: + url: + account: + username: + password: + accessKey: + globalAccount: # To be provided when using machineAgent Window Image + +agentServiceAccount: appdynamics-cluster-agent +infravizServiceAccount: appdynamics-infraviz +``` +### Install cluster agent or machine agent using helm chart +```bash +helm install cluster-agent appdynamics-charts/cluster-agent -f .yaml --namespace appdynamics +``` + +### Note: +cluster agent installation is independent of otel collector. \ No newline at end of file diff --git a/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-csaas-k8s-cluster-agent/charts/metrics-server/.helmignore b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-csaas-k8s-cluster-agent/charts/metrics-server/.helmignore new file mode 100644 index 00000000..0e8a0eb3 --- /dev/null +++ b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-csaas-k8s-cluster-agent/charts/metrics-server/.helmignore @@ -0,0 +1,23 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*.orig +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ diff --git a/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-csaas-k8s-cluster-agent/charts/metrics-server/Chart.yaml b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-csaas-k8s-cluster-agent/charts/metrics-server/Chart.yaml new file mode 100644 index 00000000..0413ef0a --- /dev/null +++ b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-csaas-k8s-cluster-agent/charts/metrics-server/Chart.yaml @@ -0,0 +1,26 @@ +annotations: + artifacthub.io/changes: | + - kind: changed + description: "Update Metrics Server image to v0.5.2." +apiVersion: v2 +appVersion: 0.5.2 +description: Metrics Server is a scalable, efficient source of container resource + metrics for Kubernetes built-in autoscaling pipelines. +home: https://github.com/kubernetes-sigs/metrics-server +icon: https://avatars.githubusercontent.com/u/36015203?s=400&v=4 +keywords: +- kubernetes +- metrics-server +- metrics +maintainers: +- name: stevehipwell + url: https://github.com/stevehipwell +- name: krmichel + url: https://github.com/krmichel +- name: endrec + url: https://github.com/endrec +name: metrics-server +sources: +- https://github.com/kubernetes-sigs/metrics-server +type: application +version: 3.7.0 diff --git a/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-csaas-k8s-cluster-agent/charts/metrics-server/README.md b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-csaas-k8s-cluster-agent/charts/metrics-server/README.md new file mode 100644 index 00000000..51817069 --- /dev/null +++ b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-csaas-k8s-cluster-agent/charts/metrics-server/README.md @@ -0,0 +1,64 @@ +# Kubernetes Metrics Server + +[Metrics Server](https://github.com/kubernetes-sigs/metrics-server/) is a scalable, efficient source of container resource metrics for Kubernetes built-in autoscaling pipelines. + + + +## Installing the Chart + +Before you can install the chart you will need to add the `metrics-server` repo to [Helm](https://helm.sh/). + +```shell +helm repo add metrics-server https://kubernetes-sigs.github.io/metrics-server/ +``` + +After you've installed the repo you can install the chart. + +```shell +helm upgrade --install metrics-server metrics-server/metrics-server +``` + +## Configuration + +The following table lists the configurable parameters of the _Metrics Server_ chart and their default values. + +| Parameter | Description | Default | +| ------------------------------------ | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------ | +| `image.repository` | Image repository. | `k8s.gcr.io/metrics-server/metrics-server` | +| `image.tag` | Image tag, will override the default tag derived from the chart app version. | `""` | +| `image.pullPolicy` | Image pull policy. | `IfNotPresent` | +| `imagePullSecrets` | Image pull secrets. | `[]` | +| `nameOverride` | Override the `name` of the chart. | `nil` | +| `fullnameOverride` | Override the `fullname` of the chart. | `nil` | +| `serviceAccount.create` | If `true`, create a new service account. | `true` | +| `serviceAccount.annotations` | Annotations to add to the service account. | `{}` | +| `serviceAccount.name` | Service account to be used. If not set and `serviceAccount.create` is `true`, a name is generated using the full name template. | `nil` | +| `rbac.create` | If `true`, create the RBAC resources. | `true` | +| `rbac.pspEnabled` | If `true`, create a pod security policy resource. | `false` | +| `apiService.create` | If `true`, create the `v1beta1.metrics.k8s.io` API service. You typically want this enabled! If you disable API service creation you have to manage it outside of this chart for e.g horizontal pod autoscaling to work with this release. | `true` | +| `podLabels` | Labels to add to the pod. | `{}` | +| `podAnnotations` | Annotations to add to the pod. | `{}` | +| `podSecurityContext` | Security context for the pod. | `{}` | +| `securityContext` | Security context for the _metrics-server_ container. | _See values.yaml_ | +| `priorityClassName` | Priority class name to use. | `system-cluster-critical` | +| `containerPort` | port for the _metrics-server_ container. | `4443` | +| `hostNetwork.enabled` | If `true`, start _metric-server_ in hostNetwork mode. You would require this enabled if you use alternate overlay networking for pods and API server unable to communicate with metrics-server. As an example, this is required if you use Weave network on EKS. | `false` | +| `replicas` | Number of replicas to run. | `1` | +| `updateStrategy` | Customise the default update strategy. | `{}` | +| `podDisruptionBudget.enabled` | If `true`, create `PodDisruptionBudget` resource. | `{}` | +| `podDisruptionBudget.minAvailable` | Set the `PodDisruptionBugdet` minimum available pods. | `nil` | +| `podDisruptionBudget.maxUnavailable` | Set the `PodDisruptionBugdet` maximum unavailable pods. | `nil` | +| `defaultArgs` | Default arguments to pass to the _metrics-server_ command. | See _values.yaml_ | +| `args` | Additional arguments to pass to the _metrics-server_ command. | `[]` | +| `livenessProbe` | Liveness probe. | See _values.yaml_ | +| `readinessProbe` | Readiness probe. | See _values.yaml_ | +| `service.type` | Service type. | `ClusterIP` | +| `service.port` | Service port. | `443` | +| `service.annotations` | Annotations to add to the service. | `{}` | +| `service.labels` | Labels to add to the service. | `{}` | +| `resources` | Resource requests and limits for the _metrics-server_ container. | `{}` | +| `extraVolumeMounts` | Additional volume mounts for the _metrics-server_ container. | `[]` | +| `extraVolumes` | Additional volumes for the pod. | `[]` | +| `nodeSelector` | Node labels for pod assignment. | `{}` | +| `tolerations` | Tolerations for pod assignment. | `[]` | +| `affinity` | Affinity for pod assignment. | `{}` | diff --git a/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-csaas-k8s-cluster-agent/charts/metrics-server/ci/ci-values.yaml b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-csaas-k8s-cluster-agent/charts/metrics-server/ci/ci-values.yaml new file mode 100644 index 00000000..b9e9ef73 --- /dev/null +++ b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-csaas-k8s-cluster-agent/charts/metrics-server/ci/ci-values.yaml @@ -0,0 +1,2 @@ +args: + - --kubelet-insecure-tls diff --git a/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-csaas-k8s-cluster-agent/charts/metrics-server/templates/NOTES.txt b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-csaas-k8s-cluster-agent/charts/metrics-server/templates/NOTES.txt new file mode 100644 index 00000000..0ad6bb07 --- /dev/null +++ b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-csaas-k8s-cluster-agent/charts/metrics-server/templates/NOTES.txt @@ -0,0 +1,7 @@ +*********************************************************************** +* Metrics Server * +*********************************************************************** + Chart version: {{ .Chart.Version }} + App version: {{ .Chart.AppVersion }} + Image tag: {{ include "metrics-server.image" . }} +*********************************************************************** diff --git a/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-csaas-k8s-cluster-agent/charts/metrics-server/templates/_helpers.tpl b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-csaas-k8s-cluster-agent/charts/metrics-server/templates/_helpers.tpl new file mode 100644 index 00000000..f5581697 --- /dev/null +++ b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-csaas-k8s-cluster-agent/charts/metrics-server/templates/_helpers.tpl @@ -0,0 +1,78 @@ +{{/* +Expand the name of the chart. +*/}} +{{- define "metrics-server.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "metrics-server.fullname" -}} +{{- if .Values.fullnameOverride }} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }} +{{- else }} +{{- $name := default .Chart.Name .Values.nameOverride }} +{{- if contains $name .Release.Name }} +{{- .Release.Name | trunc 63 | trimSuffix "-" }} +{{- else }} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }} +{{- end }} +{{- end }} +{{- end }} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "metrics-server.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Common labels +*/}} +{{- define "metrics-server.labels" -}} +helm.sh/chart: {{ include "metrics-server.chart" . }} +{{ include "metrics-server.selectorLabels" . }} +{{- if .Chart.AppVersion }} +app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} +{{- end }} +app.kubernetes.io/managed-by: {{ .Release.Service }} +{{- end }} + +{{/* +Selector labels +*/}} +{{- define "metrics-server.selectorLabels" -}} +app.kubernetes.io/name: {{ include "metrics-server.name" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +{{- end }} + +{{/* +Create the name of the service account to use +*/}} +{{- define "metrics-server.serviceAccountName" -}} +{{- if .Values.serviceAccount.create }} +{{- default (include "metrics-server.fullname" .) .Values.serviceAccount.name }} +{{- else }} +{{- default "default" .Values.serviceAccount.name }} +{{- end }} +{{- end }} + +{{/* +The image to use +*/}} +{{- define "metrics-server.image" -}} +{{- printf "%s:%s" .Values.image.repository (default (printf "v%s" .Chart.AppVersion) .Values.image.tag) }} +{{- end }} + +{{/* Get PodDisruptionBudget API Version */}} +{{- define "metrics-server.pdb.apiVersion" -}} + {{- if and (.Capabilities.APIVersions.Has "policy/v1") (semverCompare ">= 1.21-0" .Capabilities.KubeVersion.Version) -}} + {{- print "policy/v1" -}} + {{- else -}} + {{- print "policy/v1beta1" -}} + {{- end -}} +{{- end -}} diff --git a/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-csaas-k8s-cluster-agent/charts/metrics-server/templates/apiservice.yaml b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-csaas-k8s-cluster-agent/charts/metrics-server/templates/apiservice.yaml new file mode 100644 index 00000000..dd37b5db --- /dev/null +++ b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-csaas-k8s-cluster-agent/charts/metrics-server/templates/apiservice.yaml @@ -0,0 +1,17 @@ +{{- if .Values.apiService.create -}} +apiVersion: apiregistration.k8s.io/v1 +kind: APIService +metadata: + name: v1beta1.metrics.k8s.io + labels: + {{- include "metrics-server.labels" . | nindent 4 }} +spec: + group: metrics.k8s.io + groupPriorityMinimum: 100 + insecureSkipTLSVerify: true + service: + name: {{ include "metrics-server.fullname" . }} + namespace: {{ .Release.Namespace }} + version: v1beta1 + versionPriority: 100 +{{- end -}} diff --git a/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-csaas-k8s-cluster-agent/charts/metrics-server/templates/clusterrole-aggregated-reader.yaml b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-csaas-k8s-cluster-agent/charts/metrics-server/templates/clusterrole-aggregated-reader.yaml new file mode 100644 index 00000000..d5e8fe1b --- /dev/null +++ b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-csaas-k8s-cluster-agent/charts/metrics-server/templates/clusterrole-aggregated-reader.yaml @@ -0,0 +1,21 @@ +{{- if .Values.rbac.create -}} +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: {{ printf "system:%s-aggregated-reader" (include "metrics-server.name" .) }} + labels: + {{- include "metrics-server.labels" . | nindent 4 }} + rbac.authorization.k8s.io/aggregate-to-admin: "true" + rbac.authorization.k8s.io/aggregate-to-edit: "true" + rbac.authorization.k8s.io/aggregate-to-view: "true" +rules: + - apiGroups: + - metrics.k8s.io + resources: + - pods + - nodes + verbs: + - get + - list + - watch +{{- end -}} diff --git a/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-csaas-k8s-cluster-agent/charts/metrics-server/templates/clusterrole.yaml b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-csaas-k8s-cluster-agent/charts/metrics-server/templates/clusterrole.yaml new file mode 100644 index 00000000..0636414f --- /dev/null +++ b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-csaas-k8s-cluster-agent/charts/metrics-server/templates/clusterrole.yaml @@ -0,0 +1,32 @@ +{{- if .Values.rbac.create -}} +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: {{ printf "system:%s" (include "metrics-server.fullname" .) }} + labels: + {{- include "metrics-server.labels" . | nindent 4 }} +rules: + - apiGroups: + - "" + resources: + - pods + - nodes + - nodes/stats + - namespaces + - configmaps + verbs: + - get + - list + - watch + {{- if .Values.rbac.pspEnabled }} + - apiGroups: + - extensions + - policy + resources: + - podsecuritypolicies + resourceNames: + - {{ printf "privileged-%s" (include "metrics-server.fullname" .) }} + verbs: + - use + {{- end -}} +{{- end -}} diff --git a/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-csaas-k8s-cluster-agent/charts/metrics-server/templates/clusterrolebinding-auth-delegator.yaml b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-csaas-k8s-cluster-agent/charts/metrics-server/templates/clusterrolebinding-auth-delegator.yaml new file mode 100644 index 00000000..826c3b7b --- /dev/null +++ b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-csaas-k8s-cluster-agent/charts/metrics-server/templates/clusterrolebinding-auth-delegator.yaml @@ -0,0 +1,16 @@ +{{- if .Values.rbac.create -}} +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: {{ printf "%s:system:auth-delegator" (include "metrics-server.fullname" .) }} + labels: + {{- include "metrics-server.labels" . | nindent 4 }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: system:auth-delegator +subjects: + - kind: ServiceAccount + name: {{ include "metrics-server.serviceAccountName" . }} + namespace: {{ .Release.Namespace }} +{{- end -}} diff --git a/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-csaas-k8s-cluster-agent/charts/metrics-server/templates/clusterrolebinding.yaml b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-csaas-k8s-cluster-agent/charts/metrics-server/templates/clusterrolebinding.yaml new file mode 100644 index 00000000..512cb651 --- /dev/null +++ b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-csaas-k8s-cluster-agent/charts/metrics-server/templates/clusterrolebinding.yaml @@ -0,0 +1,16 @@ +{{- if .Values.rbac.create -}} +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: {{ printf "system:%s" (include "metrics-server.fullname" .) }} + labels: + {{- include "metrics-server.labels" . | nindent 4 }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: system:{{ template "metrics-server.fullname" . }} +subjects: + - kind: ServiceAccount + name: {{ include "metrics-server.serviceAccountName" . }} + namespace: {{ .Release.Namespace }} +{{- end -}} diff --git a/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-csaas-k8s-cluster-agent/charts/metrics-server/templates/deployment.yaml b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-csaas-k8s-cluster-agent/charts/metrics-server/templates/deployment.yaml new file mode 100644 index 00000000..c8ca76b4 --- /dev/null +++ b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-csaas-k8s-cluster-agent/charts/metrics-server/templates/deployment.yaml @@ -0,0 +1,94 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ include "metrics-server.fullname" . }} + labels: + {{- include "metrics-server.labels" . | nindent 4 }} +spec: + replicas: {{ .Values.replicas }} + {{- with .Values.updateStrategy }} + strategy: + {{- toYaml . | nindent 4 }} + {{- end }} + selector: + matchLabels: + {{- include "metrics-server.selectorLabels" . | nindent 6 }} + template: + metadata: + labels: + {{- include "metrics-server.selectorLabels" . | nindent 8 }} + {{- with .Values.podLabels }} + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.podAnnotations }} + annotations: + {{- toYaml . | nindent 8 }} + {{- end }} + spec: + {{- with .Values.imagePullSecrets }} + imagePullSecrets: + {{- toYaml . | nindent 8 }} + {{- end }} + serviceAccountName: {{ include "metrics-server.serviceAccountName" . }} + {{- with .Values.podSecurityContext }} + securityContext: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.priorityClassName }} + priorityClassName: {{ . | quote }} + {{- end }} + {{- if .Values.hostNetwork.enabled }} + hostNetwork: true + {{- end }} + containers: + - name: metrics-server + {{- with .Values.securityContext }} + securityContext: + {{- toYaml . | nindent 12 }} + {{- end }} + image: {{ include "metrics-server.image" . }} + imagePullPolicy: {{ .Values.image.pullPolicy }} + args: + - {{ printf "--secure-port=%d" (int .Values.containerPort) }} + {{- range .Values.defaultArgs }} + - {{ . }} + {{- end }} + {{- range .Values.args }} + - {{ . }} + {{- end }} + ports: + - name: https + protocol: TCP + containerPort: {{ .Values.containerPort }} + livenessProbe: + {{- toYaml .Values.livenessProbe | nindent 12 }} + readinessProbe: + {{- toYaml .Values.readinessProbe | nindent 12 }} + volumeMounts: + - name: tmp + mountPath: /tmp + {{- with .Values.extraVolumeMounts }} + {{- toYaml . | nindent 12 }} + {{- end }} + {{- with .Values.resources }} + resources: + {{- toYaml . | nindent 12 }} + {{- end }} + volumes: + - name: tmp + emptyDir: {} + {{- with .Values.extraVolumes }} + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.nodeSelector }} + nodeSelector: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.affinity }} + affinity: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.tolerations }} + tolerations: + {{- toYaml . | nindent 8 }} + {{- end }} diff --git a/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-csaas-k8s-cluster-agent/charts/metrics-server/templates/pdb.yaml b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-csaas-k8s-cluster-agent/charts/metrics-server/templates/pdb.yaml new file mode 100644 index 00000000..1320b207 --- /dev/null +++ b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-csaas-k8s-cluster-agent/charts/metrics-server/templates/pdb.yaml @@ -0,0 +1,18 @@ +{{- if .Values.podDisruptionBudget.enabled -}} +apiVersion: {{ include "metrics-server.pdb.apiVersion" . }} +kind: PodDisruptionBudget +metadata: + name: {{ include "metrics-server.fullname" . }} + labels: + {{- include "metrics-server.labels" . | nindent 4 }} +spec: + {{- if .Values.podDisruptionBudget.minAvailable }} + minAvailable: {{ .Values.podDisruptionBudget.minAvailable }} + {{- end }} + {{- if .Values.podDisruptionBudget.maxUnavailable }} + maxUnavailable: {{ .Values.podDisruptionBudget.maxUnavailable }} + {{- end }} + selector: + matchLabels: + {{- include "metrics-server.selectorLabels" . | nindent 6 }} +{{- end -}} diff --git a/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-csaas-k8s-cluster-agent/charts/metrics-server/templates/psp.yaml b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-csaas-k8s-cluster-agent/charts/metrics-server/templates/psp.yaml new file mode 100644 index 00000000..bf8ace1a --- /dev/null +++ b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-csaas-k8s-cluster-agent/charts/metrics-server/templates/psp.yaml @@ -0,0 +1,28 @@ +{{- if .Values.rbac.pspEnabled }} +apiVersion: policy/v1beta1 +kind: PodSecurityPolicy +metadata: + name: {{ printf "privileged-%s" (include "metrics-server.fullname" .) }} + labels: + {{- include "metrics-server.labels" . | nindent 4 }} +spec: + allowedCapabilities: + - '*' + fsGroup: + rule: RunAsAny + privileged: true + runAsUser: + rule: RunAsAny + seLinux: + rule: RunAsAny + supplementalGroups: + rule: RunAsAny + volumes: + - '*' + hostPID: true + hostIPC: true + hostNetwork: true + hostPorts: + - min: 1 + max: 65536 +{{- end }} diff --git a/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-csaas-k8s-cluster-agent/charts/metrics-server/templates/rolebinding.yaml b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-csaas-k8s-cluster-agent/charts/metrics-server/templates/rolebinding.yaml new file mode 100644 index 00000000..3fda7433 --- /dev/null +++ b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-csaas-k8s-cluster-agent/charts/metrics-server/templates/rolebinding.yaml @@ -0,0 +1,17 @@ +{{- if .Values.rbac.create -}} +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: {{ printf "%s-auth-reader" (include "metrics-server.fullname" .) }} + namespace: kube-system + labels: + {{- include "metrics-server.labels" . | nindent 4 }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: extension-apiserver-authentication-reader +subjects: + - kind: ServiceAccount + name: {{ include "metrics-server.serviceAccountName" . }} + namespace: {{ .Release.Namespace }} +{{- end -}} diff --git a/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-csaas-k8s-cluster-agent/charts/metrics-server/templates/service.yaml b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-csaas-k8s-cluster-agent/charts/metrics-server/templates/service.yaml new file mode 100644 index 00000000..7470218f --- /dev/null +++ b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-csaas-k8s-cluster-agent/charts/metrics-server/templates/service.yaml @@ -0,0 +1,22 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ include "metrics-server.fullname" . }} + {{- with .Values.service.annotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} + labels: + {{- include "metrics-server.labels" . | nindent 4 }} + {{- with .Values.service.labels -}} + {{- toYaml . | nindent 4 }} + {{- end }} +spec: + type: {{ .Values.service.type }} + ports: + - name: https + port: {{ .Values.service.port }} + protocol: TCP + targetPort: https + selector: + {{- include "metrics-server.selectorLabels" . | nindent 4 }} diff --git a/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-csaas-k8s-cluster-agent/charts/metrics-server/templates/serviceaccount.yaml b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-csaas-k8s-cluster-agent/charts/metrics-server/templates/serviceaccount.yaml new file mode 100644 index 00000000..12f77245 --- /dev/null +++ b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-csaas-k8s-cluster-agent/charts/metrics-server/templates/serviceaccount.yaml @@ -0,0 +1,12 @@ +{{- if .Values.serviceAccount.create -}} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ template "metrics-server.serviceAccountName" . }} + {{- with .Values.serviceAccount.annotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} + labels: + {{- include "metrics-server.labels" . | nindent 4 }} +{{- end -}} diff --git a/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-csaas-k8s-cluster-agent/charts/metrics-server/values.yaml b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-csaas-k8s-cluster-agent/charts/metrics-server/values.yaml new file mode 100644 index 00000000..0504399d --- /dev/null +++ b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-csaas-k8s-cluster-agent/charts/metrics-server/values.yaml @@ -0,0 +1,121 @@ +# Default values for metrics-server. +# This is a YAML-formatted file. +# Declare variables to be passed into your templates. + +image: + repository: k8s.gcr.io/metrics-server/metrics-server + # Overrides the image tag whose default is v{{ .Chart.AppVersion }} + tag: "" + pullPolicy: IfNotPresent + +imagePullSecrets: [] +# - registrySecretName + +nameOverride: "" +fullnameOverride: "" + +serviceAccount: + # Specifies whether a service account should be created + create: true + # Annotations to add to the service account + annotations: {} + # The name of the service account to use. + # If not set and create is true, a name is generated using the fullname template + name: "" + +rbac: + # Specifies whether RBAC resources should be created + create: true + pspEnabled: false + +apiService: + # Specifies if the v1beta1.metrics.k8s.io API service should be created. + # + # You typically want this enabled! If you disable API service creation you have to + # manage it outside of this chart for e.g horizontal pod autoscaling to + # work with this release. + create: true + +podLabels: {} +podAnnotations: {} + +podSecurityContext: {} + +securityContext: + readOnlyRootFilesystem: true + runAsNonRoot: true + runAsUser: 1000 + allowPrivilegeEscalation: false + +priorityClassName: system-cluster-critical + +containerPort: 4443 + +hostNetwork: + # Specifies if metrics-server should be started in hostNetwork mode. + # + # You would require this enabled if you use alternate overlay networking for pods and + # API server unable to communicate with metrics-server. As an example, this is required + # if you use Weave network on EKS + enabled: false + +replicas: 1 + +updateStrategy: {} +# type: RollingUpdate +# rollingUpdate: +# maxSurge: 0 +# maxUnavailable: 1 + +podDisruptionBudget: + # https://kubernetes.io/docs/tasks/run-application/configure-pdb/ + enabled: false + minAvailable: + maxUnavailable: + +defaultArgs: + - --cert-dir=/tmp + - --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname + - --kubelet-use-node-status-port + - --metric-resolution=15s + +args: [] + +livenessProbe: + httpGet: + path: /livez + port: https + scheme: HTTPS + initialDelaySeconds: 0 + periodSeconds: 10 + failureThreshold: 3 + +readinessProbe: + httpGet: + path: /readyz + port: https + scheme: HTTPS + initialDelaySeconds: 20 + periodSeconds: 10 + failureThreshold: 3 + +service: + type: ClusterIP + port: 443 + annotations: {} + labels: {} + # Add these labels to have metrics-server show up in `kubectl cluster-info` + # kubernetes.io/cluster-service: "true" + # kubernetes.io/name: "Metrics-server" + +resources: {} + +extraVolumeMounts: [] + +extraVolumes: [] + +nodeSelector: {} + +tolerations: [] + +affinity: {} diff --git a/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-csaas-k8s-cluster-agent/templates/NOTES.txt b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-csaas-k8s-cluster-agent/templates/NOTES.txt new file mode 100644 index 00000000..5a0bd120 --- /dev/null +++ b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-csaas-k8s-cluster-agent/templates/NOTES.txt @@ -0,0 +1,15 @@ +Check HELM release status: + + $ helm status {{ .Release.Name }} -n {{ .Release.Namespace }} + $ helm get {{ .Release.Name }} -n {{ .Release.Namespace }} + +List cluster agent and machine agent pods: + + $ kubectl get pods -n {{ .Release.Namespace }} + +Release state: + Install ClusterAgent: {{ .Values.installClusterAgent }} + Install InfraViz: {{ .Values.installInfraViz }} + Controller URL: {{ .Values.controllerInfo.url }} + Auto-Instrumentation enabled: {{ .Values.instrumentationConfig.enabled }} + Installing metrics-server: {{ get .Values.install "metrics-server" }} diff --git a/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-csaas-k8s-cluster-agent/templates/_helper.tpl b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-csaas-k8s-cluster-agent/templates/_helper.tpl new file mode 100644 index 00000000..26fc504d --- /dev/null +++ b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-csaas-k8s-cluster-agent/templates/_helper.tpl @@ -0,0 +1,5 @@ +{{- define "appdynamics-csaas-k8s-cluster-agent.sensitiveData" -}} +{{- if (get . "data") | required (get . "message") -}} +{{ (get . "data") | trim | b64enc }} +{{- end -}} +{{- end -}} diff --git a/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-csaas-k8s-cluster-agent/templates/cluster-agent.yaml b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-csaas-k8s-cluster-agent/templates/cluster-agent.yaml new file mode 100644 index 00000000..3cc54563 --- /dev/null +++ b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-csaas-k8s-cluster-agent/templates/cluster-agent.yaml @@ -0,0 +1,228 @@ +{{ if .Values.installClusterAgent -}} +apiVersion: cluster.appdynamics.com/v1alpha1 +kind: Clusteragent +metadata: + name: {{ (cat (regexReplaceAll "([^a-zA-Z0-9-.]+)" (lower (.Values.clusterAgent.appName | default (cat .Release.Name "-" "appdynamics-cluster-agent" | nospace))) "-") "-" .Release.Namespace | nospace) | trunc 63 }} + namespace: {{ .Release.Namespace }} +spec: + appName: {{ (cat (.Values.global.clusterName | default (cat .Release.Name "-" "appdynamics-cluster-agent" | nospace)) "-" .Release.Namespace | nospace) | trunc 63 }} + controllerUrl: {{ required "AppDynamics controller URL is required!" .Values.controllerInfo.url }} + account: {{ required "AppDynamics controller account is required!" .Values.controllerInfo.account }} + image: {{ .Values.imageInfo.agentImage }}:{{ .Values.imageInfo.agentTag }} + serviceAccountName: {{ .Values.agentServiceAccount }} + + {{ if .Values.controllerInfo.customSSLCert -}} + # Custom SSL config + customSSLSecret: cluster-agent-ssl-cert + {{- end }} + + {{ with .Values.controllerInfo -}} + {{ if .proxyUrl -}} + # Proxy config + proxyUrl: {{ .proxyUrl }} + {{ if .authenticateProxy -}} + proxyUser: {{ required "Proxy user is required to authenticate proxy!" .proxyUser }} + {{- end }} + {{- end }} + {{- end }} + + # Ad-Hoc properties + {{ with .Values.clusterAgent -}} + nsToMonitor: + {{- toYaml .nsToMonitor | nindent 4 }} + {{ if .nsToMonitorRegex -}} + nsToMonitorRegex: {{ .nsToMonitorRegex }} + {{- end }} + {{ if .nsToExcludeRegex -}} + nsToExcludeRegex: {{ .nsToExcludeRegex }} + {{- end }} + clusterMetricsSyncInterval: {{ .clusterMetricsSyncInterval }} + metadataSyncInterval: {{ .metadataSyncInterval }} + eventUploadInterval: {{ .eventUploadInterval }} + httpClientTimeout: {{ .httpClientTimeout }} + podBatchSize: {{ .podBatchSize }} + instrumentationMaxPollingAttempts: {{ .instrumentationMaxPollingAttempts }} + instrumentationNsStatusPollingIntervalMinutes: {{ .instrumentationNsStatusPollingIntervalMinutes }} + priorityClassName: {{ .priorityClassName }} + # Container specific properties + {{ with .containerProperties -}} + containerBatchSize: {{ .containerBatchSize }} + #containerParallelRequestLimit: {{ .containerParallelRequestLimit }} + containerRegistrationInterval: {{ .containerRegistrationInterval }} + {{- end }} + + # Metric specific properties + {{ with .metricProperties -}} + metricsSyncInterval: {{ .metricsSyncInterval }} + metricUploadRetryCount: {{ .metricUploadRetryCount }} + metricUploadRetryIntervalMilliSeconds: {{ .metricUploadRetryIntervalMilliSeconds }} + {{- end }} + + # Log specific properties + {{ with .logProperties -}} + logFileSizeMb: {{ .logFileSizeMb }} + logFileBackups: {{ .logFileBackups }} + logLevel: {{ .logLevel }} + {{- end }} + + {{ if .memoryThresholdForRestart -}} + # Cluster Agent memory threshold for restart + memoryThresholdForRestart: {{ .memoryThresholdForRestart }} + {{- end }} + + {{ if .enableClusterAgentRestartProbe -}} + # Cluster Agent graceful restart prob + enableClusterAgentRestartProbe: {{ .enableClusterAgentRestartProbe }} + {{- end }} + + {{ if .imagePullPolicy -}} + # Cluster Agent Image pull policy + imagePullPolicy: {{ .imagePullPolicy }} + {{- end }} + + {{ if .imagePullSecret -}} + # Image pull secret + imagePullSecret: {{ .imagePullSecret }} + {{- end }} + {{- end }} + + # profiling specific properties + {{ with .Values.agentProfiler -}} + pprofEnabled: {{ .pprofEnabled }} + pprofPort: {{ .pprofPort }} + {{- end }} + # Pod filter properties + podFilter: + {{- toYaml .Values.podFilter | nindent 4 }} + + # Node selector + nodeSelector: + {{- toYaml .Values.agentPod.nodeSelector | nindent 4 }} + + # Tolerations + tolerations: + {{- toYaml .Values.agentPod.tolerations | nindent 4 }} + + labels: + {{- toYaml .Values.agentPod.labels | nindent 4 }} + + # Resources + resources: + {{- toYaml .Values.agentPod.resources | nindent 4 }} + + {{ with .Values.instrumentationConfig -}} + {{ if .enabled -}} + # Instrumentation config + {{ if .defaultAppName -}} + defaultAppName: {{ .defaultAppName }} + {{- end }} + {{ if .defaultEnv -}} + defaultEnv: {{ .defaultEnv }} + {{- end }} + {{ if .defaultInstrumentationLabelMatch -}} + defaultInstrumentationLabelMatch: + {{- toYaml .defaultInstrumentationLabelMatch | nindent 4 }} + {{- end }} + {{ if .defaultInstrumentMatchString -}} + defaultInstrumentMatchString: {{ .defaultInstrumentMatchString }} + {{- end }} + {{ if .defaultCustomConfig -}} + defaultCustomConfig: {{ .defaultCustomConfig }} + {{- end }} + {{ if .appNameStrategy -}} + appNameStrategy: {{ .appNameStrategy }} + {{- end }} + {{ if .tierNameStrategy -}} + tierNameStrategy: {{ .tierNameStrategy }} + {{- end }} + {{ if .appNameLabel -}} + appNameLabel: {{ .appNameLabel }} + {{- end }} + {{ if .tierNameLabel -}} + tierNameLabel: {{ .tierNameLabel }} + {{- end }} + {{ if .nodeName -}} + nodeName: {{ .nodeName }} + {{- end }} + {{ if .imageInfo -}} + imageInfo: + {{- toYaml .imageInfo | nindent 4}} + {{- end }} + {{ if .instrumentationMethod -}} + instrumentationMethod: {{ .instrumentationMethod }} + {{- end }} + {{ if .resourcesToInstrument -}} + resourcesToInstrument: + {{- toYaml .resourcesToInstrument | nindent 4 }} + {{- end }} + {{ if .instrumentationRules -}} + instrumentationRules: + {{- toYaml .instrumentationRules | nindent 4 }} + {{- end }} + {{ if .nsToInstrumentRegex -}} + nsToInstrumentRegex: {{ .nsToInstrumentRegex }} + {{- end }} + {{ if .netvizInfo -}} + netvizInfo: + {{- toYaml .netvizInfo | nindent 4 }} + {{- end }} + {{ if .runAsUser -}} + runAsUser: {{ .runAsUser }} + {{- end }} + {{ if .runAsGroup -}} + runAsGroup: {{ .runAsGroup }} + {{- end }} + {{ if .runAsNonRoot -}} + runAsNonRoot: {{ .runAsNonRoot }} + {{- end }} + {{ if .readOnlyRootFilesystem -}} + readOnlyRootFilesystem: {{ .readOnlyRootFilesystem }} + {{- end }} + {{ if .allowPrivilegeEscalation -}} + allowPrivilegeEscalation: {{ .allowPrivilegeEscalation }} + {{- end }} + {{ if .capabilities -}} + capabilities: {{ .capabilities }} + {{- end }} + {{ if .seccompProfile -}} + seccompProfile: {{ .seccompProfile }} + {{- end }} + {{ if .windowsOptions -}} + windowsOptions: {{ .windowsOptions }} + {{- end }} + {{ if .seLinuxOptions -}} + seLinuxOptions: {{ .seLinuxOptions }} + {{- end }} + {{ if .procMount -}} + procMount: {{ .procMount }} + {{- end }} + {{ if .privileged -}} + privileged: {{ .privileged }} + {{- end }} + {{ if .numberOfTaskWorkers -}} + numberOfTaskWorkers: {{ .numberOfTaskWorkers }} + {{- end }} + {{ if .defaultAnalyticsHost -}} + defaultAnalyticsHost: {{ .defaultAnalyticsHost }} + {{- end }} + {{ if .defaultAnalyticsPort -}} + defaultAnalyticsPort: {{ .defaultAnalyticsPort }} + {{- end }} + {{ if .defaultAnalyticsSslEnabled -}} + defaultAnalyticsSslEnabled: {{ .defaultAnalyticsSslEnabled }} + {{- end }} + {{ if .enableInstallationReport -}} + enableInstallationReport: {{ .enableInstallationReport }} + {{ end -}} + {{ if .enableForceReInstrumentation -}} + enableForceReInstrumentation: {{ .enableForceReInstrumentation }} + {{ end -}} + {{if .containerAppCorrelationMethod -}} + containerAppCorrelationMethod: {{ .containerAppCorrelationMethod }} + {{ end -}} + {{if .metadataServerPort -}} + metadataServerPort: {{ .metadataServerPort }} + {{ end -}} + {{ end -}} + {{ end -}} +{{ end -}} diff --git a/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-csaas-k8s-cluster-agent/templates/infraviz.yaml b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-csaas-k8s-cluster-agent/templates/infraviz.yaml new file mode 100644 index 00000000..392d3c2c --- /dev/null +++ b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-csaas-k8s-cluster-agent/templates/infraviz.yaml @@ -0,0 +1,120 @@ +{{ if .Values.installInfraViz -}} +apiVersion: cluster.appdynamics.com/v1alpha1 +kind: InfraViz +metadata: + name: {{ printf "%s-%s" .Release.Name "appdynamics-infraviz" | trunc 63 }} + namespace: {{ .Release.Namespace }} +spec: + controllerUrl: {{ required "AppDynamics controller URL is required!" .Values.controllerInfo.url }} + account: {{ required "AppDynamics controller account is required!" .Values.controllerInfo.account }} + + {{ if .Values.controllerInfo.keyStoreFileSecret -}} + # Custom SSL config + keyStoreFileSecret: {{ .Values.controllerInfo.keyStoreFileSecret }} + keystorePasswordSecret: {{ .Values.controllerInfo.keystorePasswordSecret }} + {{- end }} + + {{- if eq .Values.infraViz.nodeOS "linux" }} + image: {{ .Values.imageInfo.machineAgentImage }}:{{ .Values.imageInfo.machineAgentTag }} + globalAccount: {{ required "AppDynamics controller global account is required when using machine-agent linux image!" .Values.controllerInfo.globalAccount }} + {{- else if eq .Values.infraViz.nodeOS "windows" }} + imageWin: {{ .Values.imageInfo.machineAgentWinImage }}:{{ .Values.imageInfo.machineAgentWinTag }} + globalAccount: {{ required "AppDynamics controller global account is required when using machine-agent win image!" .Values.controllerInfo.globalAccount }} + {{- else if eq .Values.infraViz.nodeOS "all" }} + image: {{ .Values.imageInfo.machineAgentImage }}:{{ .Values.imageInfo.machineAgentTag }} + imageWin: {{ .Values.imageInfo.machineAgentWinImage }}:{{ .Values.imageInfo.machineAgentWinTag }} + globalAccount: {{ required "AppDynamics controller global account is required when using machine-agent win image!" .Values.controllerInfo.globalAccount }} + {{- end }} + + {{ with .Values.controllerInfo -}} + {{ if .proxyUrl -}} + # Proxy config + proxyUrl: {{ .proxyUrl }} + {{ if .authenticateProxy -}} + proxyUser: {{ required "Proxy user is required to authenticate proxy!" .proxyUser }} + {{- end }} + {{- end }} + {{- end }} + + + {{ with .Values.infraViz -}} + nodeOS: {{ .nodeOS -}} + {{ if .enableMasters }} + enableMasters: {{ .enableMasters -}} + {{- end }} + {{ if .stdoutLogging -}} + stdoutLogging: {{ .stdoutLogging -}} + {{- end }} + {{ if .enableContainerHostId -}} + enableContainerHostId: {{ .enableContainerHostId -}} + {{- end }} + {{ if .runAsUser -}} + runAsUser: {{ .runAsUser -}} + {{- end }} + {{ if .runAsGroup -}} + runAsGroup: {{ .runAsGroup -}} + {{- end }} + {{ if .fsGroup -}} + fsGroup: {{ .fsGroup -}} + {{- end }} + {{ if .enableServerViz -}} + enableServerViz: {{ .enableServerViz -}} + {{- end }} + {{ if .enableDockerViz -}} + enableDockerViz: {{ .enableDockerViz -}} + {{- end }} + {{ if .eventServiceUrl -}} + eventServiceUrl: {{ .eventServiceUrl -}} + {{- end }} + + # Log specific properties + {{ with .logProperties -}} + logLevel: {{ .logLevel }} + {{- end }} + + {{ with .metricProperties -}} + metricsLimit: {{ .metricsLimit | quote -}} + {{- end }} + + {{ if .propertyBag -}} + propertyBag: {{ .propertyBag -}} + {{- end }} + + {{ if .uniqueHostId -}} + uniqueHostId: {{ .uniqueHostId -}} + {{- end }} + + {{- end }} + + {{ if .Values.netViz.enabled -}} + netVizImage: {{ .Values.imageInfo.netVizImage }}:{{ .Values.imageInfo.netvizTag }} + netVizPort: {{ .Values.netViz.netVizPort -}} + {{- end}} + {{ if .Values.infravizPod.imagePullSecret -}} + imagePullSecret: {{ .Values.infravizPod.imagePullSecret }} + {{- end }} + {{ if .Values.infravizPod.imagePullPolicy -}} + imagePullPolicy: {{ .Values.infravizPod.imagePullPolicy }} + {{- end }} + nodeSelector: + {{- toYaml .Values.infravizPod.nodeSelector | nindent 6 }} + priorityClassName: + {{- toYaml .Values.infravizPod.priorityClassName | nindent 6 }} + # Tolerations + tolerations: + {{- toYaml .Values.infravizPod.tolerations | nindent 6 }} + # Resources + resources: + {{- toYaml .Values.infravizPod.resources | nindent 6 }} + {{ if .Values.infravizPod.overrideVolumeMounts -}} + overrideVolumeMounts: + {{- toYaml .Values.infravizPod.overrideVolumeMounts | nindent 6 }} + {{ end -}} + {{ if .Values.infravizPod.env -}} + env: + {{- toYaml .Values.infravizPod.env | nindent 6 }} + {{ end -}} + #netviz resources + resourcesNetViz: + {{- toYaml .Values.netViz.resourcesNetViz | nindent 6 }} +{{ end -}} diff --git a/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-csaas-k8s-cluster-agent/templates/rbac/cr-agent-generic.yaml b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-csaas-k8s-cluster-agent/templates/rbac/cr-agent-generic.yaml new file mode 100644 index 00000000..d0470b17 --- /dev/null +++ b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-csaas-k8s-cluster-agent/templates/rbac/cr-agent-generic.yaml @@ -0,0 +1,73 @@ +{{ if .Values.installClusterAgent -}} +{{ $releaseName := .Release.Name}} +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: appdynamics-cluster-agent-{{.Release.Name}} +rules: +- apiGroups: + - "" + resources: + - pods + - pods/log + - endpoints + - persistentvolumeclaims + - resourcequotas + - nodes + - events + - services + - configmaps + - secrets + - replicationcontrollers + verbs: + - get + - watch + - list +- apiGroups: + - apps + resources: + - daemonsets + - statefulsets + - deployments + - replicasets + verbs: + - get + - watch + - list +- apiGroups: + - "batch" + - "extensions" + resources: + - "jobs" + verbs: + - "get" + - "list" + - "watch" +- apiGroups: + - metrics.k8s.io + resources: + - pods + - nodes + verbs: + - get + - list + - watch +- apiGroups: + - "" + resources: + - namespaces + verbs: + - get + - update + - list +{{ if .Capabilities.APIVersions.Has "apps.openshift.io/v1/DeploymentConfig" -}} +- apiGroups: + - apps.openshift.io + resources: + - deploymentconfigs + verbs: + - get + - watch + - list +{{- end }} +{{- end }} diff --git a/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-csaas-k8s-cluster-agent/templates/rbac/cr-agent-instrumentation.yaml b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-csaas-k8s-cluster-agent/templates/rbac/cr-agent-instrumentation.yaml new file mode 100644 index 00000000..a9e2dfe7 --- /dev/null +++ b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-csaas-k8s-cluster-agent/templates/rbac/cr-agent-instrumentation.yaml @@ -0,0 +1,66 @@ +{{ if .Values.installClusterAgent -}} +{{ $releaseName := .Release.Name}} +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: appdynamics-cluster-agent-instrumentation-{{.Release.Name}} +rules: +- apiGroups: + - "" + resources: + - pods + - pods/exec + - secrets + - configmaps + verbs: + - create + - update + - delete +- apiGroups: + - apps + resources: + - daemonsets + - statefulsets + - deployments + - replicasets + verbs: + - update +- apiGroups: + - "" + resources: + - namespaces + verbs: + - get + - update + - list +{{ if .Capabilities.APIVersions.Has "apps.openshift.io/v1/DeploymentConfig" -}} +- apiGroups: + - apps.openshift.io + resources: + - deploymentconfigs + verbs: + - update +{{- end }} +{{ with .containerAppCorrelationMethod }} +{{ if eq . "kubeapi" }} +- apiGroups: + - "" + resources: + - serviceaccounts + verbs: + - get + - create + - delete +- apiGroups: + - rbac.authorization.k8s.io + resources: + - roles + - rolebindings + verbs: + - get + - create + - update + - delete +{{- end }} +{{- end }} +{{- end }} \ No newline at end of file diff --git a/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-csaas-k8s-cluster-agent/templates/rbac/crb-agent-generic.yaml b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-csaas-k8s-cluster-agent/templates/rbac/crb-agent-generic.yaml new file mode 100644 index 00000000..a258183b --- /dev/null +++ b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-csaas-k8s-cluster-agent/templates/rbac/crb-agent-generic.yaml @@ -0,0 +1,16 @@ +{{ if .Values.installClusterAgent -}} +{{ $agentSA := .Values.agentServiceAccount }} +{{ $namespace := .Release.Namespace }} +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: {{ printf "%s-%s" .Release.Name "appdynamics-cluster-agent" }} +subjects: +{{ printf "- kind: ServiceAccount" | nindent 2 }} +{{ printf "name: %s" $agentSA | nindent 4 }} +{{ printf "namespace: %s" $namespace | nindent 4}} +roleRef: + kind: ClusterRole + name: appdynamics-cluster-agent-{{.Release.Name}} + apiGroup: rbac.authorization.k8s.io +{{ end -}} \ No newline at end of file diff --git a/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-csaas-k8s-cluster-agent/templates/rbac/crb-agent-instrumentation.yaml b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-csaas-k8s-cluster-agent/templates/rbac/crb-agent-instrumentation.yaml new file mode 100644 index 00000000..6d703554 --- /dev/null +++ b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-csaas-k8s-cluster-agent/templates/rbac/crb-agent-instrumentation.yaml @@ -0,0 +1,16 @@ +{{ if .Values.installClusterAgent -}} +{{ $agentSA := .Values.agentServiceAccount }} +{{ $namespace := .Release.Namespace }} +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: appdynamics-cluster-agent-instrumentation-{{.Release.Name}} +subjects: + {{- printf "- kind: ServiceAccount" | nindent 2 }} + {{- printf "name: %s" $agentSA | nindent 4 }} + {{- printf "namespace: %s" $namespace | nindent 4}} +roleRef: + kind: ClusterRole + name: appdynamics-cluster-agent-instrumentation-{{.Release.Name}} + apiGroup: rbac.authorization.k8s.io +{{- end -}} \ No newline at end of file diff --git a/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-csaas-k8s-cluster-agent/templates/rbac/infraviz-security-context-constraint-openshift.yaml b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-csaas-k8s-cluster-agent/templates/rbac/infraviz-security-context-constraint-openshift.yaml new file mode 100644 index 00000000..93a65bd3 --- /dev/null +++ b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-csaas-k8s-cluster-agent/templates/rbac/infraviz-security-context-constraint-openshift.yaml @@ -0,0 +1,35 @@ +{{ if .Values.installInfraViz -}} +{{ if .Capabilities.APIVersions.Has "security.openshift.io/v1/SecurityContextConstraints" -}} +apiVersion: security.openshift.io/v1 +kind: SecurityContextConstraints +metadata: + name: appdynamics-infraviz +allowHostDirVolumePlugin: true +allowHostIPC: true +allowHostNetwork: true +allowHostPID: true +allowHostPorts: true +allowPrivilegedContainer: true +allowedCapabilities: +- '*' +allowedFlexVolumes: null +defaultAddCapabilities: null +fsGroup: + type: RunAsAny +priority: null +readOnlyRootFilesystem: false +requiredDropCapabilities: null +runAsUser: + type: RunAsAny +seLinuxContext: + type: RunAsAny +seccompProfiles: +- '*' +supplementalGroups: + type: RunAsAny +users: +- system:serviceaccount:appdynamics:appdynamics-infraviz +volumes: +- '*' +{{- end }} +{{- end }} \ No newline at end of file diff --git a/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-csaas-k8s-cluster-agent/templates/rbac/sa-agent.yaml b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-csaas-k8s-cluster-agent/templates/rbac/sa-agent.yaml new file mode 100644 index 00000000..3eb71ddc --- /dev/null +++ b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-csaas-k8s-cluster-agent/templates/rbac/sa-agent.yaml @@ -0,0 +1,9 @@ +{{ if .Values.installClusterAgent -}} +{{ if .Values.createServiceAccount -}} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ .Values.agentServiceAccount }} + namespace: {{ .Release.Namespace }} +{{ end -}} +{{ end -}} \ No newline at end of file diff --git a/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-csaas-k8s-cluster-agent/templates/rbac/sa-infraviz.yaml b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-csaas-k8s-cluster-agent/templates/rbac/sa-infraviz.yaml new file mode 100644 index 00000000..410c3bd8 --- /dev/null +++ b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-csaas-k8s-cluster-agent/templates/rbac/sa-infraviz.yaml @@ -0,0 +1,7 @@ +{{ if .Values.installInfraViz -}} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ .Values.infravizServiceAccount }} + namespace: {{ .Release.Namespace }} +{{ end -}} \ No newline at end of file diff --git a/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-csaas-k8s-cluster-agent/templates/secret-cluster-agent.yaml b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-csaas-k8s-cluster-agent/templates/secret-cluster-agent.yaml new file mode 100644 index 00000000..a4483080 --- /dev/null +++ b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-csaas-k8s-cluster-agent/templates/secret-cluster-agent.yaml @@ -0,0 +1,27 @@ +{{ $secret := (lookup "v1" "Secret" .Release.Namespace "cluster-agent-secret") }} +{{ $annotations := dict "temp" "temp" }} +{{ if $secret }} + {{ $annotations = $secret.metadata.annotations }} +{{ end }} +{{ if or (not ($secret)) (get $annotations "appdynamics.helm.charts/created-by") }} +{{ $namespace := .Release.Namespace }} +{{ if .Values.installClusterAgent -}} +apiVersion: v1 +kind: Secret +metadata: + name: cluster-agent-secret + namespace: {{ $namespace }} + annotations: + appdynamics.helm.charts/created-by: cluster-agent-helm-chart +type: Opaque +data: + {{ with .Values.controllerInfo -}} + controller-key: {{ include "appdynamics-csaas-k8s-cluster-agent.sensitiveData" (dict "data" .accessKey "message" "AppDynamics controller access key is required!") }} + {{- end -}} + {{ if .Values.instrumentationConfig.enabled -}} + {{ with .Values.controllerInfo }} + api-user: {{ cat (.username | trim | required "AppDynamics controller username is required!") "@" (.account | trim | required "AppDynamics controller account is required!") ":" (.password | trim | required "Appdynamics controller password is required!") | nospace | b64enc -}} + {{- end -}} + {{- end -}} +{{ end -}} +{{ end -}} \ No newline at end of file diff --git a/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-csaas-k8s-cluster-agent/templates/secret-custom-ssl.yaml b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-csaas-k8s-cluster-agent/templates/secret-custom-ssl.yaml new file mode 100644 index 00000000..ce4d9999 --- /dev/null +++ b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-csaas-k8s-cluster-agent/templates/secret-custom-ssl.yaml @@ -0,0 +1,22 @@ +{{ $secret := (lookup "v1" "Secret" .Release.Namespace "cluster-agent-ssl-cert") }} +{{ $annotations := dict "temp" "temp" }} +{{ if $secret }} + {{ $annotations = $secret.metadata.annotations }} +{{ end }} +{{ if or (not ($secret)) (get $annotations "appdynamics.helm.charts/created-by") }} +{{ $namespace := .Release.Namespace }} +{{ with .Values -}} +{{ if .controllerInfo.customSSLCert -}} +apiVersion: v1 +kind: Secret +metadata: + name: cluster-agent-ssl-cert + namespace: {{ $namespace }} + annotations: + appdynamics.helm.charts/created-by: cluster-agent-helm-chart +type: Opaque +data: + "custom-ssl.pem": {{ .controllerInfo.customSSLCert }} +{{ end -}} +{{ end -}} +{{ end -}} \ No newline at end of file diff --git a/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-csaas-k8s-cluster-agent/templates/secret-proxy-secret.yaml b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-csaas-k8s-cluster-agent/templates/secret-proxy-secret.yaml new file mode 100644 index 00000000..18a61ded --- /dev/null +++ b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-csaas-k8s-cluster-agent/templates/secret-proxy-secret.yaml @@ -0,0 +1,25 @@ +{{ $secret := (lookup "v1" "Secret" .Release.Namespace "cluster-agent-proxy-secret") }} +{{ $annotations := dict "temp" "temp" }} +{{ if $secret }} + {{ $annotations = $secret.metadata.annotations }} +{{ end }} +{{ if or (not ($secret)) (get $annotations "appdynamics.helm.charts/created-by") }} +{{ $namespace := .Release.Namespace }} +{{ with .Values -}} +{{ if .controllerInfo.authenticateProxy -}} +{{ if not .controllerInfo.proxyUrl -}} +{{ fail "Proxy url is requried to authenticate proxy!" -}} +{{ end -}} +apiVersion: v1 +kind: Secret +metadata: + name: cluster-agent-proxy-secret + namespace: {{ $namespace }} + annotations: + appdynamics.helm.charts/created-by: cluster-agent-helm-chart +type: Opaque +data: + proxy-password: {{ include "appdynamics-csaas-k8s-cluster-agent.sensitiveData" (dict "data" .controllerInfo.proxyPassword "message" "Proxy password is required!") }} +{{ end -}} +{{ end -}} +{{ end -}} \ No newline at end of file diff --git a/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-csaas-k8s-cluster-agent/values.yaml b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-csaas-k8s-cluster-agent/values.yaml new file mode 100644 index 00000000..06a84977 --- /dev/null +++ b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-csaas-k8s-cluster-agent/values.yaml @@ -0,0 +1,136 @@ +installClusterAgent: false +installInfraViz: false + +# Docker images +imageInfo: + agentImage: docker.io/appdynamics/cluster-agent + agentTag: 24.1.0 + imagePullPolicy: Always # Will be used for operator pod + machineAgentImage: docker.io/appdynamics/machine-agent + machineAgentTag: latest + machineAgentWinImage: docker.io/appdynamics/machine-agent-analytics + machineAgentWinTag: win-latest + netVizImage: docker.io/appdynamics/machine-agent-netviz + netvizTag: latest + +# AppDynamics controller info (VALUES TO BE PROVIDED BY THE USER) +controllerInfo: + url: null + account: null + username: null + password: null + accessKey: null + globalAccount: null # To be provided when using machineAgent Window Image + + # Cluster Agent SSL properties + customSSLCert: null + # InfraViz SSL properties + keystorePasswordSecret: "" + keyStoreFileSecret: "" + + # Proxy config + authenticateProxy: false + proxyUrl: null + proxyUser: null + proxyPassword: null + +# RBAC config +createServiceAccount: true +agentServiceAccount: appdynamics-cluster-agent +infravizServiceAccount: appdynamics-infraviz + +# Cluster agent config +clusterAgent: + nsToMonitor: + - default + clusterMetricsSyncInterval: 60 + metadataSyncInterval: 60 + eventUploadInterval: 10 + httpClientTimeout: 30 + podBatchSize: 6 + instrumentationMaxPollingAttempts: 10 + instrumentationNsStatusPollingIntervalMinutes: 5 + imagePullPolicy: "" + imagePullSecret: "" + memoryThresholdForRestart: 90 + containerProperties: + containerBatchSize: 5 + containerParallelRequestLimit: 1 + containerRegistrationInterval: 120 + logProperties: + logFileSizeMb: 5 + logFileBackups: 3 + logLevel: INFO + metricProperties: + metricsSyncInterval: 30 + metricUploadRetryCount: 2 + metricUploadRetryIntervalMilliSeconds: 5 + +# Profiling specific config - set pprofEnabled true if profiling need to be enabled, +# provide pprofPort if you need different port else default port 9991 will be assigned +agentProfiler: + pprofEnabled: false + pprofPort: 9991 + +# Pod filter config +podFilter: {} + +# Instrumentation config +instrumentationConfig: + enabled: false + containerAppCorrelationMethod: proxy + +# InfraViz config +infraViz: + nodeOS: "linux" + enableMasters: false + stdoutLogging: false + enableContainerHostId: true + enableServerViz: false + enableDockerViz: false + +# Netviz config +netViz: + enabled: false + netVizPort: 3892 + resourcesNetViz: + limits: + cpu: 200m + memory: "300Mi" + requests: + cpu: 100m + memory: "150Mi" + +# Agent pod specific properties +agentPod: + nodeSelector: {} + tolerations: [] + resources: + limits: + cpu: "1250m" + memory: "300Mi" + requests: + cpu: "750m" + memory: "150Mi" + labels: {} + +# Infraviz pod specific properties +infravizPod: + imagePullSecret: "" + nodeSelector: + kubernetes.io/os: linux + tolerations: [] + priorityClassName: "" + resources: + limits: + cpu: 500m + memory: "1G" + requests: + cpu: 200m + memory: "800M" + overrideVolumeMounts: + env: + +# Subcharts boolean install switches +install: + metrics-server: false diff --git a/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-network-monitoring/Chart.yaml b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-network-monitoring/Chart.yaml new file mode 100644 index 00000000..bcdd771b --- /dev/null +++ b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-network-monitoring/Chart.yaml @@ -0,0 +1,8 @@ +apiVersion: v2 +appVersion: 24.2.0 +description: AppDynamics Network Monitoring Helm chart for Kubernetes +maintainers: +- email: support@appdynamics.com + name: AppDynamics +name: appdynamics-network-monitoring +version: 0.2.0 diff --git a/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-network-monitoring/templates/_helpers.tpl b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-network-monitoring/templates/_helpers.tpl new file mode 100644 index 00000000..673b7445 --- /dev/null +++ b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-network-monitoring/templates/_helpers.tpl @@ -0,0 +1,66 @@ +{{/* +Fully qualified app name for the kernel-collector daemonset. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "appdynamics-network-monitoring-kernel-collector.fullname" -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- printf "%s-kernel-collector" .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s-kernel-collector" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} + +{{/* +Create the name of the service account to use for the kernel-collector +*/}} +{{- define "appdynamics-network-monitoring-kernel-collector.serviceAccountName" -}} +{{- if .Values.kernelCollector.serviceAccount.create }} +{{- default (include "appdynamics-network-monitoring-kernel-collector.fullname" .) .Values.kernelCollector.serviceAccount.name }} +{{- else }} +{{- default "default" .Values.kernelCollector.serviceAccount.name }} +{{- end }} +{{- end }} + +{{/* +Fully qualified app name for the k8s-collector deployment. +*/}} +{{- define "appdynamics-network-monitoring-k8s-collector.fullname" -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- printf "%s-k8s-collector" .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s-k8s-collector" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} + +{{/* +Create the name of the service account to use for the k8s-collector +*/}} +{{- define "appdynamics-network-monitoring-k8s-collector.serviceAccountName" -}} +{{- if .Values.k8sCollector.serviceAccount.create }} +{{- default (include "appdynamics-network-monitoring-k8s-collector.fullname" .) .Values.k8sCollector.serviceAccount.name }} +{{- else }} +{{- default "default" .Values.k8sCollector.serviceAccount.name }} +{{- end }} +{{- end }} + +{{/* +Fully qualified app name for the reducer deployment. +*/}} +{{- define "appdynamics-network-monitoring-reducer.fullname" -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- printf "%s-reducer" .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s-reducer" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "appdynamics-network-monitoring.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }} +{{- end }} \ No newline at end of file diff --git a/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-network-monitoring/templates/k8s-collector-clusterrole.yaml b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-network-monitoring/templates/k8s-collector-clusterrole.yaml new file mode 100644 index 00000000..7831cf12 --- /dev/null +++ b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-network-monitoring/templates/k8s-collector-clusterrole.yaml @@ -0,0 +1,34 @@ +{{- if and .Values.enabled .Values.rbac.create -}} +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app: {{ include "appdynamics-network-monitoring-k8s-collector.fullname" . }} + chart: {{ .Chart.Name }}-{{ .Chart.Version }} + name: {{ include "appdynamics-network-monitoring-k8s-collector.fullname" . }} +rules: +- apiGroups: + - "" + resources: + - pods + verbs: + - get + - list + - watch +- apiGroups: + - apps + resources: + - replicasets + verbs: + - get + - list + - watch +- apiGroups: + - batch + resources: + - jobs + verbs: + - get + - list + - watch +{{- end -}} diff --git a/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-network-monitoring/templates/k8s-collector-clusterrolebinding.yaml b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-network-monitoring/templates/k8s-collector-clusterrolebinding.yaml new file mode 100644 index 00000000..5cf0bda8 --- /dev/null +++ b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-network-monitoring/templates/k8s-collector-clusterrolebinding.yaml @@ -0,0 +1,17 @@ +{{- if and .Values.enabled .Values.rbac.create -}} +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + labels: + app: {{ include "appdynamics-network-monitoring-k8s-collector.fullname" . }} + chart: {{ .Chart.Name }}-{{ .Chart.Version }} + name: {{ include "appdynamics-network-monitoring-k8s-collector.fullname" . }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: {{ include "appdynamics-network-monitoring-k8s-collector.fullname" . }} +subjects: +- kind: ServiceAccount + name: {{ include "appdynamics-network-monitoring-k8s-collector.serviceAccountName" . }} + namespace: {{ .Release.Namespace }} +{{- end -}} diff --git a/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-network-monitoring/templates/k8s-collector-deployment.yaml b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-network-monitoring/templates/k8s-collector-deployment.yaml new file mode 100644 index 00000000..0fe529ea --- /dev/null +++ b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-network-monitoring/templates/k8s-collector-deployment.yaml @@ -0,0 +1,81 @@ +{{- if and .Values.enabled .Values.k8sCollector.enabled }} +# The k8s-collector consists of two services: +# 1) k8s-watcher: talks to the Kubernetes API server to determine the current state of +# the cluster; sets up watches to be notified of subsequent changes to pods, services +# and other resources. +# 2) k8s-relay: relays the information collected by k8s-watcher to the reducer. +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ include "appdynamics-network-monitoring-k8s-collector.fullname" . }} + labels: + app.kubernetes.io/name: {{ include "appdynamics-network-monitoring-k8s-collector.fullname" . }} + helm.sh/chart: {{ include "appdynamics-network-monitoring.chart" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/managed-by: {{ .Release.Service }} +spec: + progressDeadlineSeconds: 600 + replicas: 1 + revisionHistoryLimit: 3 + selector: + matchLabels: + app.kubernetes.io/name: {{ include "appdynamics-network-monitoring-k8s-collector.fullname" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + template: + metadata: + labels: + app.kubernetes.io/name: {{ include "appdynamics-network-monitoring-k8s-collector.fullname" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + spec: + {{- with .Values.k8sCollector.imagePullSecrets }} + imagePullSecrets: + {{- toYaml . | nindent 8}} + {{- end }} + containers: + - image: "{{ .Values.k8sCollector.watcher.image }}" + imagePullPolicy: {{ .Values.k8sCollector.watcher.imagePullPolicy }} + name: k8s-watcher + {{- if hasKey .Values.k8sCollector.watcher "resources" }} + resources: + {{- toYaml .Values.k8sCollector.watcher.resources | nindent 10 }} + {{- end }} + args: + - --log-console + {{- if hasKey .Values.log "level" }} + - --log-level={{ .Values.log.level }} + {{- end }} + - image: "{{ .Values.k8sCollector.relay.image }}" + imagePullPolicy: {{ .Values.k8sCollector.relay.imagePullPolicy }} + name: k8s-relay + {{- if hasKey .Values.k8sCollector.relay "resources" }} + resources: + {{- toYaml .Values.k8sCollector.relay.resources | nindent 10 }} + {{- end }} + args: + {{- if .Values.log.console }} + - --log-console + {{- end }} + {{- if hasKey .Values.log "level" }} + - --{{ .Values.log.level }} + {{- end }} + env: + - name: "EBPF_NET_INTAKE_HOST" + value: {{ include "appdynamics-network-monitoring-reducer.fullname" . }} + - name: "EBPF_NET_INTAKE_PORT" + value: "{{ default 7000 .Values.reducer.telemetryPort }}" + terminationGracePeriodSeconds: 30 + securityContext: {} + serviceAccountName: {{ include "appdynamics-network-monitoring-k8s-collector.serviceAccountName" . }} + {{- with .Values.k8sCollector.nodeSelector }} + nodeSelector: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.k8sCollector.affinity }} + affinity: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.k8sCollector.tolerations }} + tolerations: + {{- toYaml . | nindent 8 }} + {{- end }} +{{- end }} diff --git a/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-network-monitoring/templates/k8s-collector-serviceaccount.yaml b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-network-monitoring/templates/k8s-collector-serviceaccount.yaml new file mode 100644 index 00000000..d09a4e16 --- /dev/null +++ b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-network-monitoring/templates/k8s-collector-serviceaccount.yaml @@ -0,0 +1,9 @@ +{{- if and .Values.enabled .Values.k8sCollector.serviceAccount.create -}} +apiVersion: v1 +kind: ServiceAccount +metadata: + labels: + app: {{ include "appdynamics-network-monitoring-k8s-collector.fullname" . }} + chart: {{ .Chart.Name }}-{{ .Chart.Version }} + name: {{ include "appdynamics-network-monitoring-k8s-collector.serviceAccountName" . }} +{{- end -}} diff --git a/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-network-monitoring/templates/kernel-collector-daemonset.yaml b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-network-monitoring/templates/kernel-collector-daemonset.yaml new file mode 100644 index 00000000..fc0b5392 --- /dev/null +++ b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-network-monitoring/templates/kernel-collector-daemonset.yaml @@ -0,0 +1,92 @@ +{{- if and .Values.enabled .Values.kernelCollector.enabled }} +# kernel collector daemonset: deploys the kernel collector to each node in the cluster. +# The kernel collector needs to be able to compile and install +# eBPF programs in the node's kernel, so needs to run as root and +# needs to mount /lib/modules and /usr/src from the node itself. +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: {{ include "appdynamics-network-monitoring-kernel-collector.fullname" . }} + labels: + app.kubernetes.io/name: {{ include "appdynamics-network-monitoring-kernel-collector.fullname" . }} + helm.sh/chart: {{ include "appdynamics-network-monitoring.chart" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/managed-by: {{ .Release.Service }} +spec: + selector: + matchLabels: + app.kubernetes.io/name: {{ include "appdynamics-network-monitoring-kernel-collector.fullname" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + template: + metadata: + labels: + app.kubernetes.io/name: {{ include "appdynamics-network-monitoring-kernel-collector.fullname" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + spec: + {{- with .Values.kernelCollector.imagePullSecrets }} + imagePullSecrets: + {{- toYaml . | nindent 8}} + {{- end }} + containers: + - name: kernel-collector + image: "{{ .Values.kernelCollector.image }}" + imagePullPolicy: {{ .Values.kernelCollector.imagePullPolicy }} + args: + - --disable-nomad-metadata + {{- if .Values.log.console }} + - --log-console + {{- end }} + {{- if hasKey .Values.log "level" }} + - --{{ .Values.log.level }} + {{- end }} + env: + - name: "EBPF_NET_KERNEL_HEADERS_AUTO_FETCH" + value: "true" + - name: "EBPF_NET_INTAKE_HOST" + value: {{ include "appdynamics-network-monitoring-reducer.fullname" . }} + - name: "EBPF_NET_INTAKE_PORT" + value: "{{ default 7000 .Values.reducer.telemetryPort }}" + - name: "EBPF_NET_HOST_DIR" + value: "/hostfs" + {{- if .Values.kernelCollector.env }} + {{- toYaml .Values.kernelCollector.env | nindent 12 }} + {{- end }} + {{- if hasKey .Values.kernelCollector "resources" }} + resources: + {{- toYaml .Values.kernelCollector.resources | nindent 12 }} + {{- end }} + securityContext: + privileged: true + volumeMounts: + - mountPath: /hostfs/ + name: host-root + readOnly: true + - mountPath: /hostfs/var/cache + name: host-var-cache + readOnly: false + dnsPolicy: ClusterFirstWithHostNet + hostNetwork: true + hostPID: true + serviceAccountName: {{ include "appdynamics-network-monitoring-kernel-collector.serviceAccountName" . }} + volumes: + - name: host-root + hostPath: + path: / + type: Directory + - name: host-var-cache + hostPath: + path: /var/cache + type: DirectoryOrCreate + {{- with .Values.kernelCollector.nodeSelector }} + nodeSelector: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.kernelCollector.affinity }} + affinity: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.kernelCollector.tolerations }} + tolerations: + {{- toYaml . | nindent 8 }} + {{- end }} +{{- end }} diff --git a/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-network-monitoring/templates/kernel-collector-serviceaccount.yaml b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-network-monitoring/templates/kernel-collector-serviceaccount.yaml new file mode 100644 index 00000000..1fc03c4e --- /dev/null +++ b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-network-monitoring/templates/kernel-collector-serviceaccount.yaml @@ -0,0 +1,9 @@ +{{- if and .Values.enabled .Values.kernelCollector.serviceAccount.create -}} +apiVersion: v1 +kind: ServiceAccount +metadata: + labels: + app: {{ include "appdynamics-network-monitoring-kernel-collector.fullname" . }} + chart: {{ .Chart.Name }}-{{ .Chart.Version }} + name: {{ include "appdynamics-network-monitoring-kernel-collector.serviceAccountName" . }} +{{- end -}} diff --git a/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-network-monitoring/templates/reducer-deployment.yaml b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-network-monitoring/templates/reducer-deployment.yaml new file mode 100644 index 00000000..6717f52d --- /dev/null +++ b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-network-monitoring/templates/reducer-deployment.yaml @@ -0,0 +1,82 @@ +{{- if .Values.enabled}} +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ include "appdynamics-network-monitoring-reducer.fullname" . }} + labels: + helm.sh/chart: {{ include "appdynamics-network-monitoring.chart" . }} + app.kubernetes.io/name: {{ include "appdynamics-network-monitoring-reducer.fullname" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} + app.kubernetes.io/managed-by: {{ .Release.Service }} +spec: + replicas: 1 + selector: + matchLabels: + app.kubernetes.io/name: {{ include "appdynamics-network-monitoring-reducer.fullname" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + template: + metadata: + labels: + app.kubernetes.io/name: {{ include "appdynamics-network-monitoring-reducer.fullname" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + spec: + {{- with .Values.reducer.imagePullSecrets }} + imagePullSecrets: + {{- toYaml . | nindent 8}} + {{- end }} + containers: + - name: reducer + image: "{{ .Values.reducer.image }}" + imagePullPolicy: {{ .Values.reducer.imagePullPolicy }} + args: + - --port={{- .Values.reducer.telemetryPort }} + - --log-console + - --no-log-file + {{- if hasKey .Values.log "level" }} + - --{{ .Values.log.level }} + {{- end }} + - --disable-prometheus-metrics + - --enable-otlp-grpc-metrics + - --otlp-grpc-metrics-host=appdynamics-otel-collector-service + - --otlp-grpc-metrics-port=24317 + {{- if .Values.reducer.disableMetrics }} + - --disable-metrics={{join "," .Values.reducer.disableMetrics}} + {{- end }} + {{- if .Values.reducer.enableMetrics }} + - --enable-metrics={{join "," .Values.reducer.enableMetrics}} + {{- end }} + {{- if .Values.reducer.ingestShards }} + - --num-ingest-shards={{- .Values.reducer.ingestShards }} + {{- end }} + {{- if .Values.reducer.matchingShards }} + - --num-matching-shards={{- .Values.reducer.matchingShards }} + {{- end }} + {{- if .Values.reducer.aggregationShards }} + - --num-aggregation-shards={{- .Values.reducer.aggregationShards }} + {{- end }} + ports: + - name: telemetry + containerPort: {{ .Values.reducer.telemetryPort }} + protocol: TCP + readinessProbe: + initialDelaySeconds: 5 + periodSeconds: 20 + timeoutSeconds: 5 + exec: + command: ['/srv/health_check.sh', 'readiness_probe', 'localhost', {{ quote .Values.reducer.telemetryPort }}] + resources: + {{- toYaml .Values.reducer.resources | nindent 12 }} + {{- with .Values.reducer.nodeSelector }} + nodeSelector: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.reducer.affinity }} + affinity: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.reducer.tolerations }} + tolerations: + {{- toYaml . | nindent 8 }} + {{- end }} +{{- end }} diff --git a/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-network-monitoring/templates/reducer-service.yaml b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-network-monitoring/templates/reducer-service.yaml new file mode 100644 index 00000000..681dcbb5 --- /dev/null +++ b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-network-monitoring/templates/reducer-service.yaml @@ -0,0 +1,22 @@ +{{- if .Values.enabled}} +apiVersion: v1 +kind: Service +metadata: + name: {{ include "appdynamics-network-monitoring-reducer.fullname" . }} + labels: + helm.sh/chart: {{ include "appdynamics-network-monitoring.chart" . }} + app.kubernetes.io/name: {{ include "appdynamics-network-monitoring-reducer.fullname" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} + app.kubernetes.io/managed-by: {{ .Release.Service }} +spec: + type: ClusterIP + selector: + app.kubernetes.io/name: {{ include "appdynamics-network-monitoring-reducer.fullname" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + ports: + - name: telemetry + port: {{ .Values.reducer.telemetryPort }} + targetPort: telemetry + protocol: TCP +{{- end }} diff --git a/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-network-monitoring/values.schema.json b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-network-monitoring/values.schema.json new file mode 100644 index 00000000..29806acf --- /dev/null +++ b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-network-monitoring/values.schema.json @@ -0,0 +1,155 @@ +{ + "$schema": "http://json-schema.org/schema#", + "type": "object", + "title": "Values", + "description": "AppDynamics Network Monitoring Configuration", + "additionalProperties": false, + "properties": { + "global": { + "type": "object", + "properties": { + "clusterName": { + "description": "The name for cluster where the collectors and target allocator are deployed", + "type": "string" + }, + "clusterId": { + "description": "The uid of kube-system namespace, required when helm lookup is not supported", + "type": "string" + } + }, + "required": [ + "clusterName" + ] + }, + "enabled": { + "description": "Enable network monitoring", + "type": "boolean" + }, + "nameOverride": { + "description": "Override name of the chart used in Kubernetes object names.", + "type": "string" + }, + "log": { + "type": "object", + "additionalProperties": false, + "properties": { + "console": { + "type": "boolean" + }, + "level": { + "type": "string", + "enum": [ + "error", + "warning", + "info", + "debug", + "trace" + ] + } + } + }, + "kernelCollector": { + "type": "object", + "properties": { + "image": { + "type": "string" + }, + "imagePullPolicy": { + "type": "string", + "enum": [ + "IfNotPresent", + "Always", + "Never" + ] + }, + "imagePullSecrets": { + "type": "array", + "items": { + "type": "string" + } + } + }, + "additionalProperties": true + }, + "k8sCollector": { + "type": "object", + "properties": { + "relay": { + "type": "object", + "properties": { + "image": { + "type": "string" + }, + "imagePullPolicy": { + "type": "string", + "enum": [ + "IfNotPresent", + "Always", + "Never" + ] + } + }, + "additionalProperties": true + }, + "watcher": { + "type": "object", + "properties": { + "image": { + "type": "string" + }, + "imagePullPolicy": { + "type": "string", + "enum": [ + "IfNotPresent", + "Always", + "Never" + ] + } + }, + "additionalProperties": true + }, + "imagePullSecrets": { + "type": "array", + "items": { + "type": "string" + } + } + }, + "additionalProperties": true + }, + "reducer": { + "type": "object", + "properties": { + "image": { + "type": "string" + }, + "imagePullPolicy": { + "type": "string", + "enum": [ + "IfNotPresent", + "Always", + "Never" + ] + }, + "imagePullSecrets": { + "type": "array", + "items": { + "type": "string" + } + } + }, + "additionalProperties": true + }, + "rbac": { + "type": "object", + "additionalProperties": false, + "properties": { + "create": { + "type": "boolean" + } + } + } + }, + "required": [], + "anyOf": [] +} \ No newline at end of file diff --git a/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-network-monitoring/values.yaml b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-network-monitoring/values.yaml new file mode 100644 index 00000000..92e150ed --- /dev/null +++ b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-network-monitoring/values.yaml @@ -0,0 +1,80 @@ +global: + clusterName: "" + tls: + otelReceiver: + secret: {} + settings: {} + otelExporter: + secret: {} + settings: {} + +enabled: false +nameOverride: "" + +log: + console: false + # possible values: { error | warning | info | debug | trace } + level: warning + +kernelCollector: + enabled: true + image: otel/opentelemetry-ebpf-kernel-collector:v0.10.2 + imagePullPolicy: Always + imagePullSecrets: [] + nodeSelector: + kubernetes.io/arch: amd64 + kubernetes.io/os: linux + + serviceAccount: + create: false + name: "" + + tolerations: + - operator: "Exists" + effect: "NoExecute" + - operator: "Exists" + effect: "NoSchedule" + + affinity: {} + resources: {} + +k8sCollector: + enabled: true + relay: + image: otel/opentelemetry-ebpf-k8s-relay:v0.10.2 + imagePullPolicy: Always + resources: {} + watcher: + image: otel/opentelemetry-ebpf-k8s-watcher:v0.10.2 + imagePullPolicy: Always + resources: {} + imagePullSecrets: [] + nodeSelector: + kubernetes.io/arch: amd64 + kubernetes.io/os: linux + affinity: {} + tolerations: [] + serviceAccount: + create: false + name: "" + +reducer: + ingestShards: 1 + matchingShards: 1 + aggregationShards: 1 + disableMetrics: + - ebpf_net.all + enableMetrics: [] + telemetryPort: 7000 + image: otel/opentelemetry-ebpf-reducer:v0.10.2 + imagePullPolicy: Always + imagePullSecrets: [] + resources: {} + nodeSelector: + kubernetes.io/arch: amd64 + kubernetes.io/os: linux + affinity: {} + tolerations: [] + +rbac: + create: true diff --git a/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-otel-collector/Chart.yaml b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-otel-collector/Chart.yaml new file mode 100644 index 00000000..6dd0d4df --- /dev/null +++ b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-otel-collector/Chart.yaml @@ -0,0 +1,8 @@ +apiVersion: v1 +appVersion: 24.7.0-1646 +description: AppDynamics distributed Opentelemetry Collector Helm chart for Kubernetes +maintainers: +- email: support@appdynamics.com + name: AppDynamics +name: appdynamics-otel-collector +version: 24.7.0-1646 diff --git a/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-otel-collector/README.md b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-otel-collector/README.md new file mode 100644 index 00000000..5e30eeaa --- /dev/null +++ b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-otel-collector/README.md @@ -0,0 +1,71 @@ +# Helm chart for Appd collector with Opentelemetry Operator + +## Required Values +Some components, if used, required user input value. + +### OtlpHttp Exporter +OtlpHttp exporter need to specify backend endpoint and Ouath2 related properties: +These are required values: +```yaml +clientId: "id" # clientId for oauth2 extension +tokenUrl: "https://token_ur.com/oauth2l" # tokenUrl for oauth2 extension +endpoint: "https://data.appdynamics.com" # endpoint for otlphttp exporeter +``` +Client secret can be presented in plain text(clientSecret) or environment variable(clientSecretEnvVar) and at least one of them must be provided. +when clientSecret and clientSecretEnvVar are both provided, clientSecret will be used. +```yaml +# clientSecret plain text for oauth2 extension +clientSecret: "secret" + +# clientSecret set by environment variable for oauth2 extension +# When using this format, the value will be set to environment variable APPD_OTELCOL_CLIENT_SECRET, and +# collector config will read the secret from ${APPD_OTELCOL_CLIENT_SECRET}. +clientSecretEnvVar: + value: "secret" +``` +clientSecretEnvVar can be used for kubernetes secret. +```yaml +# clientSecret set by environment variable which value is read from kubernetes secret. +clientSecret: + valueFrom: + secretKeyRef: + name: "oauth-client-secret" + key: "secret" +``` +example for configuring values by helm command line: +```shell +helm install release-name appdynamics-otel-collector \ + --set clientId="clientId" \ + --set clientSecretEnvVar.secretKeyRef.name="oauth-client-secret" \ + --set clientSecretEnvVar.secretKeyRef.key="secret" \ + --set tokenUrl="https://example-token-url" \ + --set endpoint="https://example:443/v1beta/metrics" +``` +You can also disable Oauth2 for testing, please be aware you will also need to remove all related configs manually include exporter config and pipeline. +see examples/remove_oauth.yaml. + +### Prometheus Receiver +Prometheus receiver if used, must have at least one scrape config, following showed an example k8s pod discovery config. +```yaml +prometheus: + config: + scrape_configs: + - job_name: k8s + kubernetes_sd_configs: + - role: pod + # namespace must be manually specified, otherwise prometheus will explore all namespaces. + namespaces: + names: [ "default" ] + relabel_configs: + - source_labels: [ __meta_kubernetes_pod_annotation_prometheus_io_scrape ] + regex: "true" + action: keep +``` +If using k8s discovery scape, don't forget to give necessary rbac rules. +```yaml +rbac: + rules: + - apiGroups: [ "" ] + resources: [ "pods" ] + verbs: [ "get", "list", "watch" ] +``` \ No newline at end of file diff --git a/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-otel-collector/examples/client_secret_env.yaml b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-otel-collector/examples/client_secret_env.yaml new file mode 100644 index 00000000..35e42b30 --- /dev/null +++ b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-otel-collector/examples/client_secret_env.yaml @@ -0,0 +1,8 @@ +clientId: "id" +clientSecretEnvVar: + valueFrom: + secretKeyRef: + name: "oauth-client-secret" + key: "secret" +tokenUrl: "https://token_ur.com/oauth2l" +endpoint: "https://data.appdynamics.com" \ No newline at end of file diff --git a/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-otel-collector/examples/headless_service.yaml b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-otel-collector/examples/headless_service.yaml new file mode 100644 index 00000000..d56d949f --- /dev/null +++ b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-otel-collector/examples/headless_service.yaml @@ -0,0 +1,10 @@ +# required +endpoint: "https://data.appdynamics.com" +clientId: "id" +clientSecret: "secret" +tokenUrl: "https://token_ur.com/oauth2l" + +# If your cluster does not deploy the service mesh that can do http/2 load balancing for grpc, +# we recommended to deploy the otel collector with a headless service to enable the grpc client load balance. +service: + clusterIP: None \ No newline at end of file diff --git a/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-otel-collector/examples/instrumentation.yaml b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-otel-collector/examples/instrumentation.yaml new file mode 100644 index 00000000..0b676525 --- /dev/null +++ b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-otel-collector/examples/instrumentation.yaml @@ -0,0 +1,12 @@ +# required +endpoint: "https://data.appdynamics.com" +clientId: "id" +clientSecret: "secret" +tokenUrl: "https://token_ur.com/oauth2l" + +# add namespace you want to instrument +# The resources you want to instrument should have annotation: +# instrumentation.opentelemetry.io/inject-: "true" +instrumentation: + namespaces: + default: diff --git a/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-otel-collector/examples/instrumentation_tls.yaml b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-otel-collector/examples/instrumentation_tls.yaml new file mode 100644 index 00000000..a730cc25 --- /dev/null +++ b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-otel-collector/examples/instrumentation_tls.yaml @@ -0,0 +1,33 @@ +global: + tls: + otelReceiver: + secret: + secretName: root-secret + secretKeys: + tlsCert: tls.crt + tlsKey: tls.key + settings: + min_version: 1.2 + max_version: 1.3 +# required +endpoint: "https://data.appdynamics.com" +clientId: "id" +clientSecret: "secret" +tokenUrl: "https://token_ur.com/oauth2l" + +# add namespace you want to instrument +# The resources you want to instrument should have annotation: +# instrumentation.opentelemetry.io/inject-: "true" +instrumentation: +# by default insecure is true, when insecure is false, tls will be used to protect the communication with AppDynamics Distribution of OpenTelemetry Collector. + insecure: false + namespaces: + default: + metadata: + labels: + my-label: tls + spec: + java: + env: + - name: OTEL_EXPORTER_OTLP_CERTIFICATE # The tls.crt should contain dns name appdynamics-otel-collector-service.appdynamics.svc.cluster.local + value: /etc/agent/cert/tls.crt # path to the crt should exist in the instrumented pod. \ No newline at end of file diff --git a/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-otel-collector/examples/not_installed.yaml b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-otel-collector/examples/not_installed.yaml new file mode 100644 index 00000000..767c5b28 --- /dev/null +++ b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-otel-collector/examples/not_installed.yaml @@ -0,0 +1,2 @@ +# Sample values.yaml received by this sub-chart when flag is set to false +install: false \ No newline at end of file diff --git a/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-otel-collector/examples/prometheus.yaml b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-otel-collector/examples/prometheus.yaml new file mode 100644 index 00000000..ad2bd1d6 --- /dev/null +++ b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-otel-collector/examples/prometheus.yaml @@ -0,0 +1,11 @@ +# required +global: + clusterName: "my-cluster" +endpoint: "https://data.appdynamics.com" +clientId: "dummy" +clientSecret: "dummy" +tokenUrl: "dummy" +enablePrometheus: true +# Replicas must set to enable the collector deployment, you can also set minReplicas and maxReplicas to enable autoscaling +spec: + replicas: 2 diff --git a/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-otel-collector/examples/prometheus_and_filelog.yaml b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-otel-collector/examples/prometheus_and_filelog.yaml new file mode 100644 index 00000000..0d3bd528 --- /dev/null +++ b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-otel-collector/examples/prometheus_and_filelog.yaml @@ -0,0 +1,19 @@ +# required +global: + clusterName: "my-cluster" +endpoint: "https://data.appdynamics.com" +clientId: "dummy" +clientSecret: "dummy" +tokenUrl: "dummy" +enablePrometheus: true +enableFileLog: true +# set replicas for statefulset which gathers prometheus metrics, you can also set minReplicas and maxReplicas to enable autoscaling +mode: + statefulset: + spec: + replicas: 2 + daemonset: + configOverride: + receivers: + filelog: + include: ["/var/log/*/*/*/*.log", "/var/lib/docker/containers/*"] diff --git a/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-otel-collector/examples/remove_oauth.yaml b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-otel-collector/examples/remove_oauth.yaml new file mode 100644 index 00000000..3e000cb5 --- /dev/null +++ b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-otel-collector/examples/remove_oauth.yaml @@ -0,0 +1,17 @@ +# required +endpoint: "https://data.appdynamics.com" +# Dummy values for oauth2 is required. This is a test example to disable oauth2, do NOT use it in production. +clientId: "dummy" +clientSecret: "dummy" +tokenUrl: "dummy" + +configOverride: + # remove auth field from otlp exporter + exporters: + otlphttp: + auth: null + # to disbale oauth2 remove from service section. The below is the default. + # service: + # extensions: [health_check, oauth2client] + service: + extensions: [health_check] diff --git a/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-otel-collector/examples/self_telemetry.yaml b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-otel-collector/examples/self_telemetry.yaml new file mode 100644 index 00000000..c4a37f49 --- /dev/null +++ b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-otel-collector/examples/self_telemetry.yaml @@ -0,0 +1,11 @@ +# required +endpoint: "https://data.appdynamics.com" +clientId: "dummy" +clientSecret: "dummy" +tokenUrl: "dummy" + +# self telemetry +selfTelemetry: true +setPodUID: true +# selfTelemetryServiceName: "appd-otel-collector1" +# selfTelemetryServiceNamespace: "otelcol1" \ No newline at end of file diff --git a/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-otel-collector/examples/simple.yaml b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-otel-collector/examples/simple.yaml new file mode 100644 index 00000000..dc64aba2 --- /dev/null +++ b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-otel-collector/examples/simple.yaml @@ -0,0 +1,11 @@ +# required +endpoint: "https://data.appdynamics.com" +clientId: "id" +clientSecret: "secret" +tokenUrl: "https://token_ur.com/oauth2l" +configOverride: + service: + pipelines: + traces: + exporters: [ otlphttp, logging ] + diff --git a/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-otel-collector/examples/tailsampling.yaml b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-otel-collector/examples/tailsampling.yaml new file mode 100644 index 00000000..92154992 --- /dev/null +++ b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-otel-collector/examples/tailsampling.yaml @@ -0,0 +1,62 @@ +global: + clusterName: clusterName + clusterId: testid + +# required +endpoint: "https://data.appdynamics.com" +clientId: "id" +clientSecret: "secret" +tokenUrl: "https://token_ur.com/oauth2l" + +spec: + image: "829771730735.dkr.ecr.us-west-2.amazonaws.com/otel/appdynamics-cloud-otel-collector:23.7.0-1075" + +presets: + samplerDebug: + enable: false + presampler: + enable: false + #pipeline: [memory_limiter, k8sattributes, consistent_proportional_sampler/presampler, batch/traces] + pipeline: [memory_limiter, k8sattributes, tracerecord/received, consistent_sampler/presampler, tracerecord/sampled, batch/traces] #replace with this pipeline when testing adding configured p value directly. + consistent_proportional_sampler: + export_period: 1s # the export period for specifying the expected output rate, it is for rate calculation only, NOT for batch interval. The batch interval can be configured at trace_classification_and_sampling.samplers.export_period, or you can add a batch processor before this. + spans_per_period: 100 # number of spans per request, the expected rate limit is calculated by dividing this number by export_period. The spans per packet is limited by the max packet size, assuming 1MB limit, and each span with size of 1KB + exponential_smooth: 0.1 # start with small number + initial_estimate_rate: 100 # number of incomming span rate, just give a reasonable guess. + rate_estimator: batch_rate_estimator + sample_mode: presampling + consistent_sampler: + p_value: 1 # user can configure a p value to add to the trace state directly, it is mainly for testing purpose + tailsampler: + enable: true + trace_classification_and_sampling: + decision_wait: 10s + # classification, example considers error, high latency and all other traces, each category will be rate limit separately. + policies: + - name: errors-policy + type: status_code + sampler_name: "consistent_reservoir_sampler/error" + status_code: + status_codes: [ERROR] + - name: high-latency + type: latency + sampler_name: "consistent_reservoir_sampler/latency" + latency: + threshold_ms: 10000 + - name: always-on + type: always_sample + sampler_name: "consistent_reservoir_sampler/anyother" + samplers: + export_period: 1s # export interval. The overall request rate need to be multiplied the number of collector, tier one is 1000 req/sec, https://docs.appdynamics.com/fso/cloud-native-app-obs/en/licensing-for-cloud-native-application-observability/license-tokens-tiers-and-rate-limits + consistent_reservoir_sampler: + error: + reservoir_size: 10 + latency: + reservoir_size: 10 + anyother: + reservoir_size: 100 + consistent_proportional_sampler: + export_period: 1s # the export period for specifying the expected output rate, it is for rate calculation only, NOT for batch interval. The batch interval can be configured at trace_classification_and_sampling.samplers.export_period, or you can add a batch processor before this. + spans_per_period: 100 # number of spans per request, the expected rate limit is calculated by dividing this number by export_period. The spans per packet is limited by the max packet size, assuming 1MB limit, and each span with size of 1KB + exponential_smooth: 0.1 # start with small number + initial_estimate_rate: 100 # number of incomming span rate, just give a reasonable guess. diff --git a/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-otel-collector/examples/tls_from_secret_exporter.yaml b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-otel-collector/examples/tls_from_secret_exporter.yaml new file mode 100644 index 00000000..35bee740 --- /dev/null +++ b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-otel-collector/examples/tls_from_secret_exporter.yaml @@ -0,0 +1,20 @@ +# required +clientId: "id" +clientSecret: "secret" +tokenUrl: "https://token_ur.com/oauth2l" +endpoint: "https://data.appdynamics.com" + +global: + tls: + otelExporter: + secret: + path: /opt/appd + secretName: my_secret + secretKeys: + caCert: ca.crt + tlsCert: tls.crt + tlsKey: tls.key + settings: + insecure: false + min_version: 1.2 + max_version: 1.3 diff --git a/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-otel-collector/examples/tls_from_secret_receiver.yaml b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-otel-collector/examples/tls_from_secret_receiver.yaml new file mode 100644 index 00000000..1f53aae8 --- /dev/null +++ b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-otel-collector/examples/tls_from_secret_receiver.yaml @@ -0,0 +1,19 @@ +# required +clientId: "id" +clientSecret: "secret" +tokenUrl: "https://token_ur.com/oauth2l" +endpoint: "https://data.appdynamics.com" + +global: + tls: + otelReceiver: + secret: + secretName: otel-cert + secretKeys: + caCert: ca.crt + tlsCert: tls.crt + tlsKey: tls.key + settings: + min_version: 1.2 + max_version: 1.3 + diff --git a/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-otel-collector/examples/tls_values.yaml b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-otel-collector/examples/tls_values.yaml new file mode 100644 index 00000000..29abfd04 --- /dev/null +++ b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-otel-collector/examples/tls_values.yaml @@ -0,0 +1,23 @@ +# required +clientId: "id" +clientSecret: "secret" +tokenUrl: "https://token_ur.com/oauth2l" +endpoint: "https://data.appdynamics.com" + +global: + tls: + otelReceiver: + settings: + min_version: 1.2 + max_version: 1.3 + ca_file: /etc/ssl/client.pem + cert_file: /etc/ssl/server.crt + key_file: /etc/ssl/server.key + #reload_interval: 5s + otelExporter: + settings: + insecure: false + cert_file: /etc/ssl/client.crt + key_file: /etc/ssl/client.key + min_version: "1.1" + max_version: "1.2" diff --git a/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-otel-collector/templates/NOTES.txt b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-otel-collector/templates/NOTES.txt new file mode 100644 index 00000000..62dbfcdf --- /dev/null +++ b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-otel-collector/templates/NOTES.txt @@ -0,0 +1,28 @@ +CHART NAME: {{ .Chart.Name }} +CHART VERSION: {{ .Chart.Version }} +APP VERSION: {{ .Chart.AppVersion }} + +** Please be patient while the chart is being deployed ** + +AppDynamics OpenTelemetry Collectors are listening for OTLP traffic at service "{{ .Values.service.name }}". + +The following components are enabled for logs, metrics and traces. + +{{.Values.config.service | toYaml }} + + +Check the release status by running: + kubectl --namespace {{ .Release.Namespace }} get pods | grep -i {{ .Release.Name }} + +For details about OpenTelemetry Collector configuration, please go to +https://docs.appdynamics.com/latest/en/application-monitoring/appdynamics-for-opentelemetry/configure-the-opentelemetry-collector + + +THIRD PARTY LICENSE DISCLOSURE +=============================== +AppDynamics OpenTelemetry Collector +-------------------------------------------------- +https://www.cisco.com/c/dam/en_us/about/doing_business/open_source/docs/AppDynamics_Distribution_for_OpenTelemetry_Collector-2470-1721941458.pdf + + + diff --git a/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-otel-collector/templates/_agent_management_and_global_input.tpl b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-otel-collector/templates/_agent_management_and_global_input.tpl new file mode 100644 index 00000000..9acab4a2 --- /dev/null +++ b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-otel-collector/templates/_agent_management_and_global_input.tpl @@ -0,0 +1,107 @@ +{{- define "appdynamics-otel-collector.namespace" -}} +{{- if .Values.global.smartAgentInstall -}} +{{- default .Release.Namespace .Values.global.namespace }} +{{- else }} +{{- .Release.Namespace }} +{{- end }} +{{- end }} + +{{- define "appdynamics-otel-collector.cluster.name" -}} +{{- if .Values.global.smartAgentInstall -}} +{{ "AGENT_PLATFORM_NAME_VALUE" }} +{{- else -}} +{{ required "clusterName needs to be specified" .Values.global.clusterName }} +{{- end }} +{{- end }} + +{{- define "appdynamics-otel-collector.clusterId" -}} +{{- if .Values.global.smartAgentInstall -}} +{{ "AGENT_PLATFORM_ID_VALUE" }} +{{- else -}} +{{ (include "appdynamics-otel-collector.readClusterId" .) }} +{{- end }} +{{- end }} + +{{- define "appdynamics-otel-collector.derivedOAuth" -}} +{{- if .Values.global.smartAgentInstall -}} +client_id: {{ "OAUTH_ID_VALUE" }} +client_secret: {{ "OAUTH_SECRET_PLAIN_VALUE" }} +token_url: {{ "OAUTH_URL_VALUE" }} +{{- else -}} +client_id: {{ .Values.clientId | default (.Values.global.oauth).clientId | required ".clientId is required" }} +token_url: {{ .Values.tokenUrl | default (.Values.global.oauth).tokenUrl | required ".tokenUrl is required" }} +{{- if .Values.clientSecret }} +client_secret: {{ .Values.clientSecret }} +{{- else if .Values.clientSecretEnvVar }} +client_secret: "${APPD_OTELCOL_CLIENT_SECRET}" +{{- else if .Values.clientSecretVolume }} +client_secret: {{ (include "appdynamics-otel-collector.clientSecretVolumePath" .Values.clientSecretVolume) | toYaml }} +{{- else if (.Values.global.oauth).clientSecretEnvVar }} +client_secret: "${APPD_OTELCOL_CLIENT_SECRET}" +{{- else }} +client_secret: {{required ".clientSecret is required" (.Values.global.oauth).clientSecret}} +{{- end }} +{{- end }} +{{- end }} + +{{- define "appdynamics-otel-collector.endpoint" -}} +{{- if .Values.global.smartAgentInstall -}} +{{ "SERVICE_DOMAIN_VALUE" }}/data +{{- else -}} +{{ .Values.endpoint | default (.Values.global.oauth).endpoint | required ".endpoint is required" }} +{{- end }} +{{- end }} + +{{/* + Generate the secret environment variable for OAuth2.0 + */}} +{{- define "appdynamics-otel-collector.clientSecretEnvVar" -}} +{{- if .Values.clientSecretEnvVar -}} +name: APPD_OTELCOL_CLIENT_SECRET +{{- .Values.clientSecretEnvVar | toYaml | nindent 0}} +{{- else if (.Values.global.oauth).clientSecretEnvVar -}} +{{- (.Values.global.oauth).clientSecretEnvVar | toYaml | nindent 0}} +{{- end }} +{{- end }} + +{{/* httpProxy */}} +{{- define "appdynamics-otel-collector.client.agent.proxy" -}} +{{- if .Values.global.smartAgentInstall -}} +agent_http_proxy: {{ "AGENT_HTTP_PROXY_VALUE" }} +agent_https_proxy: {{ "AGENT_HTTPS_PROXY_VALUE" }} +{{- else }} +{{ with .Values.global.agentManagementProxy -}} +agent_http_proxy: {{ .httpProxy }} +agent_https_proxy: {{ .httpsProxy }} +agent_no_proxy: {{- toYaml .noProxy | nindent 4 }} +{{- end }} +{{- end }} +{{- end }} + +{{/* tenant id extracted from token url or from smart agent directly*/}} +{{- define "appdynamics-otel-collector.tenant.id" -}} +{{- if .Values.global.smartAgentInstall -}} +{{ "OAUTH_TENANT_ID_VALUE" }} +{{- else -}} +{{- if .Values.tenantId -}} +{{ .Values.tenantId }} +{{- else -}} +{{- $tokenUrl := .Values.tokenUrl | default (.Values.global.oauth).tokenUrl | required ".tokenUrl is required" -}} +{{- $authTenantId := (regexFind "\\/auth\\/[0-9a-z\\-]{36}" $tokenUrl) -}} +{{- if eq (len $authTenantId) (add (len "/auth/") 36) -}} +{{ substr (len "/auth/") (len $authTenantId) $authTenantId }} +{{- else -}} +{{- required "Please provide tenantId." "" }} +{{- end }} +{{- end }} +{{- end }} +{{- end }} + +{{- define "appdynamics-otel-collector.serviceURL" -}} +{{- if .Values.global.smartAgentInstall -}} +{{ "SERVICE_URL_VALUE" }} +{{- else -}} +{{- $endpoint := (include "appdynamics-otel-collector.endpoint" .) -}} +{{ substr 0 (int (sub (len $endpoint) (len "/data"))) $endpoint }}/rest/agent/service +{{- end }} +{{- end }} diff --git a/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-otel-collector/templates/_config-sampler.tpl b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-otel-collector/templates/_config-sampler.tpl new file mode 100644 index 00000000..b3da10ab --- /dev/null +++ b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-otel-collector/templates/_config-sampler.tpl @@ -0,0 +1,152 @@ +{{- define "appdynamics-otel-collector.tailsampler.loadbalancing" -}} +processors: + batch/traces: + send_batch_size: 100 # too large size very impact loadbalancing exporter performance +exporters: + loadbalancing: + routing_key: "traceID" + protocol: + otlp: + compression: none + tls: + insecure: true + retry_on_failure: + max_interval: 5s + resolver: + k8s: + service: {{ .Values.presets.tailsampler.service.name -}}.{{ include "appdynamics-otel-collector.namespace" .}} + ports: [24317] +service: + pipelines: + traces: + exporters: [loadbalancing] +{{- end}} + +{{- define "appdynamics-otel-collector.tailsampler.sampler" -}} +receivers: + otlp/groupedtraces: + protocols: + grpc: + endpoint: 0.0.0.0:24317 + http: + endpoint: 0.0.0.0:24318 +processors: +{{- $deploy_mode := split "_" .Values.presets.tailsampler.deploy_mode }} +{{- if eq $deploy_mode._0 "sidecar" }} + k8sattributes: + passthrough: false +{{- end}} + trace_classification_and_sampling: +{{- .Values.presets.tailsampler.trace_classification_and_sampling | toYaml | nindent 4 }} + consistent_proportional_sampler: +{{- .Values.presets.tailsampler.consistent_proportional_sampler | toYaml | nindent 4 }} + groupbyattrs/compact: + groupbytrace: +{{- .Values.presets.tailsampler.groupbytrace | toYaml | nindent 4 }} + intermediate_sampler: +{{- .Values.presets.tailsampler.intermediate_sampler | toYaml | nindent 4 }} + +service: + pipelines: + traces/sampler: + receivers: [otlp/groupedtraces] + processors: +{{- if eq $deploy_mode._0 "sidecar" }} +{{- .Values.presets.tailsampler.pipeline_sidecar_loadbalancer | toYaml | nindent 8}} +{{- else }} +{{- .Values.presets.tailsampler.pipeline | toYaml | nindent 8}} +{{- end}} + exporters: [otlphttp] +{{- end}} + +{{- define "appdynamics-otel-collector.tailsampler.tlsConfig.loadbalancing" -}} +{{- if .Values.global.tls.otelExporter.settings }} +exporters: + loadbalancing: + protocol: + otlp: + compression: none + tls: +{{- deepCopy .Values.global.tls.otelExporter.settings | toYaml | nindent 10}} +{{- end}} +{{- end}} + +{{- define "appdynamics-otel-collector.tailsampler.tlsConfig.tracegrouping" -}} +{{- if .Values.global.tls.otelReceiver.settings }} +receivers: + otlp/groupedtraces: + protocols: + grpc: + tls: +{{- deepCopy .Values.global.tls.otelReceiver.settings | toYaml | nindent 10}} + http: + tls: +{{- deepCopy .Values.global.tls.otelReceiver.settings | toYaml | nindent 10}} +{{- end}} +{{- end}} + +{{- define "appdynamics-otel-collector.tailsampler.samplingLoadBalancerDefaultPaths" -}} +{{- if .secret }} +{{ $path := .path | default "/etc/otel/certs/receiver"}} +{{- if .secret.secretKeys.caCert}} +ca_file: {{$path}}/{{.secret.secretKeys.caCert}} +{{- end}} +cert_file: {{$path}}/{{.secret.secretKeys.tlsCert}} +key_file: {{$path}}/{{.secret.secretKeys.tlsKey}} +{{- end}} +{{- end}} + +{{- define "appdynamics-otel-collector.tailsampler.tlsConfigFromSecrets.loadbalancing" -}} +{{- with .Values.global.tls.otelReceiver}} +{{- if .secret }} +exporters: + loadbalancing: + protocol: + otlp: + tls: +{{- (include "appdynamics-otel-collector.tailsampler.samplingLoadBalancerDefaultPaths" .) | nindent 10}} + insecure_skip_verify: true + insecure: false +{{- end}} +{{- end}} +{{- end}} + + +{{- define "appdynamics-otel-collector.tailsampler.tlsConfigFromSecrets.tracegrouping" -}} +{{- with .Values.global.tls.otelReceiver}} +receivers: + otlp/groupedtraces: + protocols: + grpc: + tls: +{{- (include "appdynamics-otel-collector.serverDefaultPaths" .) | nindent 10}} + http: + tls: +{{- (include "appdynamics-otel-collector.serverDefaultPaths" .) | nindent 10}} +{{- end}} +{{- end}} + +{{- define "appdynamics-otel-collector.presampler" -}} +{{- if .Values.presets.presampler.enable }} +processors: + consistent_proportional_sampler/presampler: +{{- .Values.presets.presampler.consistent_proportional_sampler | toYaml | nindent 4 }} + consistent_sampler/presampler: +{{- .Values.presets.presampler.consistent_sampler | toYaml | nindent 4 }} +service: + pipelines: + traces: + processors: +{{- if eq .Values.presets.presampler.deploy_mode "gateway"}} +{{- .Values.presets.presampler.pipeline | toYaml | nindent 8}} +{{- else }} +{{- .Values.presets.presampler.pipeline_sidecar | toYaml | nindent 8}} +{{- end }} +{{- end }} +{{- end }} + +{{- define "appdynamics-otel-collector.samplerdebug" -}} +{{- if .Values.presets.samplerDebug.enable }} +{{- .Values.presets.samplerDebug.config | toYaml }} +{{- end }} +{{- end }} diff --git a/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-otel-collector/templates/_helps.tpl b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-otel-collector/templates/_helps.tpl new file mode 100644 index 00000000..c34c2e28 --- /dev/null +++ b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-otel-collector/templates/_helps.tpl @@ -0,0 +1,238 @@ +{{/* +Expand the name of the chart. +*/}} +{{- define "appdynamics-otel-collector.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* + Create a default fully qualified app name. + We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). + If release name contains chart name it will be used as a full name. +*/}} +{{- define "appdynamics-otel-collector.fullname" -}} +{{- if .Values.fullnameOverride }} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }} +{{- else }} +{{- $name := default .Chart.Name .Values.nameOverride }} +{{- if contains $name .Release.Name }} +{{- .Release.Name | trunc 63 | trimSuffix "-" }} +{{- else }} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }} +{{- end }} +{{- end }} +{{- end }} + +{{/* + Create a default fully qualified app name. + We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). + If release name contains chart name it will be used as a full name. +*/}} +{{- define "appdynamics-otel-collector.deployFullname" -}} +{{- if .var1.Values.fullnameOverride }} +{{- printf "%s%s-%s" .var1.Values.fullnameOverride .os .type | trunc 63 | trimSuffix "-" }} +{{- else }} +{{- $name := default .var1.Chart.Name .var1.Values.nameOverride }} +{{- if contains $name .var1.Release.Name }} +{{- printf "%s%s-%s" .var1.Release.Name .os .type | trunc 63 | trimSuffix "-" }} +{{- else }} +{{- printf "%s%s-%s-%s" .var1.Release.Name .os .type $name | trunc 63 | trimSuffix "-" }} +{{- end }} +{{- end }} +{{- end }} + +{{/* + Create a default fully qualified app name for linux daemonset gateway. + */}} +{{- define "appdynamics-otel-collector.daemonset.fullname" -}} +{{ include "appdynamics-otel-collector.deployFullname" (dict "var1" . "os" "" "type" "ds") | trunc 42 | trimSuffix "-" }} +{{- end }} + +{{/* + Create a default fully qualified app name for daemonset gateway. + */}} +{{- define "appdynamics-otel-collector.daemonset.windows.fullname" -}} +{{ include "appdynamics-otel-collector.deployFullname" (dict "var1" . "os" "-win" "type" "ds") | trunc 42 | trimSuffix "-" }} +{{- end }} + + +{{/* + Create a default fully qualified app name for linux statefulset gateway. + */}} +{{- define "appdynamics-otel-collector.statefulset.fullname" -}} +{{ include "appdynamics-otel-collector.deployFullname" (dict "var1" . "os" "" "type" "ss") | trunc 42 | trimSuffix "-" }} +{{- end }} + +{{/* + Create a default fully qualified app name for statefulset gateway. + */}} +{{- define "appdynamics-otel-collector.statefulset.windows.fullname" -}} +{{ include "appdynamics-otel-collector.deployFullname" (dict "var1" . "os" "-win" "type" "ss") | trunc 42 | trimSuffix "-" }} +{{- end }} + +{{/* + Create a default fully qualified app name for linux sampler. +*/}} +{{- define "appdynamics-otel-collector.tailsampler.fullname" -}} +{{ include "appdynamics-otel-collector.deployFullname" (dict "var1" . "os" "" "type" "ts") | trunc 42 | trimSuffix "-" }} +{{- end }} + +{{/* + Create a default fully qualified app name for windows sampler. +*/}} +{{- define "appdynamics-otel-collector.tailsampler.windows.fullname" -}} +{{ include "appdynamics-otel-collector.deployFullname" (dict "var1" . "os" "-win" "type" "ts") | trunc 42 | trimSuffix "-" }} +{{- end }} + +{{/* + Create a default fully qualified app name for linux sampler. + */}} +{{- define "appdynamics-otel-collector.sidecar.fullname" -}} +{{ include "appdynamics-otel-collector.deployFullname" (dict "var1" . "os" "" "type" "sc") | trunc 42 | trimSuffix "-" }} +{{- end }} + +{{/* + Create a default fully qualified app name for windows sampler. + */}} +{{- define "appdynamics-otel-collector.sidecar.windows.fullname" -}} +{{ include "appdynamics-otel-collector.deployFullname" (dict "var1" . "os" "-win" "type" "sc") | trunc 42 | trimSuffix "-" }} +{{- end }} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "appdynamics-otel-collector.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Common labels +Open telemetry operator assigns recommended labels like "app.kubernetes.io/instance" automatically, to avoid conflict, +we change to to use app.appdynamics.otel.collector. +*/}} +{{- define "appdynamics-otel-collector.labels" -}} +helm.sh/chart: {{ include "appdynamics-otel-collector.chart" . }} +{{- if .Chart.AppVersion }} +app.appdynamics.otel.collector/version: {{ .Chart.AppVersion | quote }} +{{- end }} +app.appdynamics.otel.collector/managed-by: Helm +{{- end }} + +{{/* +Selector labels for all +*/}} +{{- define "appdynamics-otel-collector.selectorLabels" -}} +app.appdynamics.otel.collector/name: {{ include "appdynamics-otel-collector.name" . }} +app.appdynamics.otel.collector/instance: {{ .Release.Name }} +{{- end }} + +{{/* +Selector labels for gateway +*/}} +{{- define "appdynamics-otel-collector.gateway.selectorLabels" -}} +{{- include "appdynamics-otel-collector.selectorLabels" . }} +{{- $deploy_mode := split "_" .Values.presets.tailsampler.deploy_mode }} +{{- if and .Values.presets.tailsampler.enable (eq $deploy_mode._1 "gateway")}} +app.appdynamics.otel.collector/tailsampler: "true" +{{- end }} +app.appdynamics.otel.collector/gateway: "true" +{{- end }} + +{{/* +Selector labels for daemonset +*/}} +{{- define "appdynamics-otel-collector.selectorLabelsDaemonset" -}} +{{- include "appdynamics-otel-collector.gateway.selectorLabels" . }} +app.appdynamics.otel.collector/mode: "daemonset" +{{- end }} + +{{/* +Selector labels for statefulset +*/}} +{{- define "appdynamics-otel-collector.selectorLabelsStatefulset" -}} +{{- include "appdynamics-otel-collector.gateway.selectorLabels" . }} +app.appdynamics.otel.collector/mode: "statefulset" +{{- end }} + +{{/* +Selector labels for sampler +*/}} +{{- define "appdynamics-otel-collector.tailsampler.selectorLabels" -}} +{{- include "appdynamics-otel-collector.selectorLabels" . }} +app.appdynamics.otel.collector/tailsampler: "true" +{{- end }} + +{{/* +Create the name of the service account to use +*/}} +{{- define "appdynamics-otel-collector.serviceAccountName" -}} +{{- if .Values.serviceAccount.create }} +{{- default (include "appdynamics-otel-collector.fullname" .) .Values.serviceAccount.name }} +{{- else }} +{{- default "default" .Values.serviceAccount.name }} +{{- end }} +{{- end }} + +{{/* +Create the name of the service account for target allocator to use +*/}} +{{- define "appdynamics-otel-collector.targetAllocatorServiceAccountName" -}} +{{- if .Values.targetAllocatorServiceAccount.create }} +{{- default (printf "%s%s" (include "appdynamics-otel-collector.fullname" .) "-target-allocator") .Values.targetAllocatorServiceAccount.name }} +{{- else }} +{{- default "default" .Values.targetAllocatorServiceAccount.name }} +{{- end }} +{{- end }} + +{{/* +Merge labels from user inputs +*/}} +{{- define "appdynamics-otel-collector.finalLabelsDaemonset" -}} +{{- $labels := fromYaml (include "appdynamics-otel-collector.labels" .) -}} +{{- $labels := (include "appdynamics-otel-collector.selectorLabelsDaemonset" .) | fromYaml | mustMergeOverwrite $labels }} +{{- $labels := mustMergeOverwrite .Values.labels $labels -}} +{{ toYaml $labels }} +{{- end }} + +{{/* +Merge labels from user inputs +*/}} +{{- define "appdynamics-otel-collector.finalLabelsStatefulset" -}} +{{- $labels := fromYaml (include "appdynamics-otel-collector.labels" .) -}} +{{- $labels := (include "appdynamics-otel-collector.selectorLabelsStatefulset" .) | fromYaml | mustMergeOverwrite $labels }} +{{- $labels := mustMergeOverwrite .Values.labels $labels -}} +{{ toYaml $labels }} +{{- end }} + +{{/* + Merge labels from user inputs + */}} +{{- define "appdynamics-otel-collector.tailsampler.finalLabels" -}} +{{- $labels := fromYaml (include "appdynamics-otel-collector.labels" .) -}} +{{- $labels := (include "appdynamics-otel-collector.tailsampler.selectorLabels" .) | fromYaml | mustMergeOverwrite $labels }} +{{- $labels := mustMergeOverwrite $labels .Values.labels -}} +{{ toYaml $labels }} +{{- end }} + +{{/* +Generate agent management k8s deployment naming config +example input - (dict "var1" . "os" "win" "type" "ss") +*/}} +{{- define "appdynamics-otel-collector.agentManagementNameConfig" -}} +{{- if or .var1.Values.agentManagement .var1.Values.agentManagementSelfTelemetry }} +extensions: + appdagentmanagementextension: + deployment: + name: {{.name}}-collector +{{- end }} +{{- end }} + +{{- define "appdynamics-otel-collector.agentManagementModeConfig" -}} +{{- if or .var1.Values.agentManagement .var1.Values.agentManagementSelfTelemetry }} +extensions: + appdagentmanagementextension: + deployment: + type: {{.mode}} +{{- end }} +{{- end }} + diff --git a/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-otel-collector/templates/_release-specific.tpl b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-otel-collector/templates/_release-specific.tpl new file mode 100644 index 00000000..ff0ccced --- /dev/null +++ b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-otel-collector/templates/_release-specific.tpl @@ -0,0 +1,7 @@ +{{- define "appdynamics-otel-collector.readClusterId" }} +{{- if (lookup "v1" "Namespace" "" "kube-system").metadata -}} +{{ required "Could not fetch kube-system uid to populate clusterID! " (lookup "v1" "Namespace" "" "kube-system").metadata.uid }} +{{- else -}} +{{ .Values.global.clusterId | required "clusterId needs to be specified when kube-system metadata is not accessible!" }} +{{- end }} +{{- end }} \ No newline at end of file diff --git a/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-otel-collector/templates/_spec-daemonset.tpl b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-otel-collector/templates/_spec-daemonset.tpl new file mode 100644 index 00000000..4df59a3b --- /dev/null +++ b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-otel-collector/templates/_spec-daemonset.tpl @@ -0,0 +1,188 @@ +{{- define "appdynamics-otel-collector-daemonset.filelog-receiver.basedefination" -}} +{{- if .var1.Values.enableFileLog }} +receivers: + filelog: + poll_interval: 10ms + include: {{ .var1.Values.filelogReceiverConfig.includeLogsPath }} + exclude: {{ .var1.Values.filelogReceiverConfig.excludeLogsPath }} + start_at: beginning + include_file_path: true + include_file_name: false + operators: + - type: router + id: get-format + routes: + - output: parser-docker + expr: 'body matches "^\\{"' + - type: json_parser + id: parser-docker + output: extract_metadata_from_filepath + timestamp: + parse_from: attributes.time + layout: '%Y-%m-%dT%H:%M:%S.%LZ' + - type: move + from: attributes.log + to: body + - type: regex_parser + id: extract_metadata_from_filepath + regex: '^.*\/(?P[^_]+)_(?P[^_]+)_(?P[a-f0-9\-]{36})\/(?P[^\._]+)\/(?P\d+)\.log$' + parse_from: attributes["log.file.path"] + - type: move + from: attributes.stream + to: attributes["log.iostream"] + - type: move + from: attributes.container_name + to: resource["k8s.container.name"] + - type: move + from: attributes.namespace + to: resource["k8s.namespace.name"] + - type: move + from: attributes.pod_name + to: resource["k8s.pod.name"] + - type: move + from: attributes.restart_count + to: resource["k8s.container.restart_count"] + - type: move + from: attributes.uid + to: resource["k8s.pod.uid"] +processors: + resource/filelog: + attributes: + - key: telemetry.sdk.name + action: upsert + value: "log-agent" + - key: k8s.cluster.id + action: upsert + value: {{ (include "appdynamics-otel-collector.clusterId" .var1) | quote}} + - key: source.name + action: upsert + value: "log-agent" + - key: k8s.cluster.name + action: upsert + value: {{ (include "appdynamics-otel-collector.cluster.name" .var1) | quote}} + - key: _message_parser.pattern + action: upsert + value: {{ .var1.Values.filelogReceiverConfig.messageParserPattern | quote }} + - key: _message_parser.type + action: upsert + value: {{ .var1.Values.filelogReceiverConfig.messageParserType | quote }} + - key: log_sender + action: upsert + value: "AppD_filelog_recevier" + - key: host.name + action: delete + - key: cloud.provider + action: delete + - key: cloud.platform + action: delete + - key: cloud.region + action: delete + - key: cloud.account.id + action: delete + - key: cloud.availability_zone + action: delete + - key: host.image.id + action: delete + - key: host.type + action: delete + - key: host.name + action: delete + resourcedetection: + detectors: ["ec2", "system"] + system: + hostname_sources: ["os", "cname", "lookup", "dns"] + transform/filelog: + log_statements: + - context: resource + statements: + - set(attributes["internal.container.encapsulating_object_id"],Concat([attributes["k8s.cluster.id"],attributes["k8s.pod.uid"]],":")) + k8sattributes/filelog: + auth_type: "serviceAccount" + passthrough: false + {{- if .Values.nodeLocalTrafficMode }} + filter: + node_from_env_var: NODE_NAME + {{- end }} + extract: + metadata: + - k8s.pod.name + - k8s.pod.uid + - k8s.deployment.name + - k8s.namespace.name + - k8s.node.name + - k8s.pod.start_time + - container.id + pod_association: + - sources: + - from: resource_attribute + name: k8s.pod.uid +service: + pipelines: + logs/filelog: + receivers: [filelog] + processors: ["memory_limiter","k8sattributes/filelog", "resourcedetection", "resource/filelog","transform/filelog"] + exporters: [logging, otlphttp] +{{- end }} +{{- end }} + +{{/* + daemonset config + var1 - global scope + var2 - computed spec +*/}} +{{- define "appdynamics-otel-collector-daemonset.autoValueConfig" -}} +{{- $mergedConfig := (include "appdynamics-otel-collector.autoValueConfig" . | fromYaml ) }} +{{- $mergedConfig := mustMergeOverwrite $mergedConfig (include "appdynamics-otel-collector-daemonset.filelog-receiver.basedefination" . | fromYaml ) }} +{{- $mergedConfig := mustMergeOverwrite $mergedConfig (include "appdynamics-otel-collector.agentManagementModeConfig" (dict "mode" "daemonset" "var1" .var1) | fromYaml ) }} +{{- if .var1.Values.mode.daemonset.configOverride }} +{{- $mergedConfig := mustMergeOverwrite $mergedConfig (deepCopy .var1.Values.mode.daemonset.configOverride)}} +{{- end }} +{{- toYaml $mergedConfig }} +{{- end }} + +{{- define "appdynamics-otel-collector-daemonset.valueVarLogVolumeMounts" -}} +name: {{.name}} +mountPath: {{.mountPath}} +{{- end }} +{{- define "appdynamics-otel-collector-daemonset.valueVarLogVolume" -}} +name: {{.name}} +hostPath: + path: {{.path}} +{{- end }} + +{{- define "appdynamics-otel-collector-daemonset.valueVarLogVolumeSpec" -}} +{{- if .Values.enableFileLog }} +podSecurityContext: + runAsUser: 10001 + runAsGroup: 0 +{{- $specVolumeMounts := get .spec "volumeMounts" | deepCopy }} +{{- if not $specVolumeMounts }} +{{- $specVolumeMounts = list }} +{{- end }} +{{- $specVolumeMounts = append $specVolumeMounts (include "appdynamics-otel-collector-daemonset.valueVarLogVolumeMounts" (dict "name" "varlog" "mountPath" "/var/log") | fromYaml)}} +{{- $specVolumeMounts = append $specVolumeMounts (include "appdynamics-otel-collector-daemonset.valueVarLogVolumeMounts" (dict "name" "varlibdockercontainers" "mountPath" " /var/lib/docker/containers") | fromYaml)}} +volumeMounts: +{{- $specVolumeMounts | toYaml | nindent 2}} + +{{- $specVolume := get .spec "volumes" | deepCopy }} +{{- if not $specVolume }} +{{- $specVolume = list }} +{{- end }} +{{- $specVolume = append $specVolume (include "appdynamics-otel-collector-daemonset.valueVarLogVolume" (dict "name" "varlog" "path" "/var/log") | fromYaml)}} +{{- $specVolume = append $specVolume (include "appdynamics-otel-collector-daemonset.valueVarLogVolume" (dict "name" "varlibdockercontainers" "path" "/var/lib/docker/containers") | fromYaml)}} +volumes: +{{- $specVolume | toYaml | nindent 2}} +{{- end }} +{{- end }} + +{{/* + daemonset spec +*/}} +{{- define "appdynamics-otel-collector-daemonset.spec" -}} +{{- $spec := (include "appdynamics-otel-collector.spec" . | fromYaml) }} +{{- $spec := include "appdynamics-otel-collector-daemonset.valueVarLogVolumeSpec" (dict "Values" .Values "spec" $spec) | fromYaml | deepCopy | mustMergeOverwrite $spec }} +{{- $config := include "appdynamics-otel-collector-daemonset.autoValueConfig" (dict "var1" . "var2" $spec) | deepCopy | fromYaml }} +{{- $spec := include "appdynamics-otel-collector.configToYamlString" $config | fromYaml | deepCopy | mustMergeOverwrite $spec }} +{{- $spec := .Values.mode.daemonset.spec | deepCopy | mustMergeOverwrite $spec }} +{{- toYaml $spec }} +{{- end }} \ No newline at end of file diff --git a/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-otel-collector/templates/_spec-linux.tpl b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-otel-collector/templates/_spec-linux.tpl new file mode 100644 index 00000000..074a9b83 --- /dev/null +++ b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-otel-collector/templates/_spec-linux.tpl @@ -0,0 +1,69 @@ +{{/* + daemonset linux config + var1 - global scope + var2 - computed spec +*/}} +{{- define "appdynamics-otel-collector-daemonset-linux.autoValueConfig" -}} +{{- $mergedConfig := (include "appdynamics-otel-collector-daemonset.autoValueConfig" . | fromYaml ) }} +{{- $mergedConfig := mustMergeOverwrite $mergedConfig (include "appdynamics-otel-collector.agentManagementNameConfig" (dict "name" (include "appdynamics-otel-collector.daemonset.fullname" .var1) "var1" .var1) | fromYaml ) }} +{{- with .var1.Values.env.linux.configOverride }} +{{- $mergedConfig := mustMergeOverwrite $mergedConfig (deepCopy .) }} +{{- end }} +{{- with ((.var1.Values.env.linux.mode).daemonset).configOverride }} +{{- $mergedConfig := mustMergeOverwrite $mergedConfig (deepCopy .) }} +{{- end }} +{{- toYaml $mergedConfig }} +{{- end }} + + +{{/* + daemonset linux spec +*/}} +{{- define "appdynamics-otel-collector-daemonset-linux.spec" -}} +{{- $spec := (include "appdynamics-otel-collector-daemonset.spec" . | fromYaml) }} +{{- if .Values.env.linux.spec -}} +{{- $spec := .Values.env.linux.spec | deepCopy | mustMergeOverwrite $spec }} +{{- end }} +{{- with ((.Values.env.linux.mode).daemonset).spec }} +{{- $spec := . | deepCopy | mustMergeOverwrite $spec }} +{{- end }} +{{- $spec := include "appdynamics-otel-collector.appendGoMemLimitEnv" (dict "spec" $spec "Values" .Values) | fromYaml | deepCopy | mustMergeOverwrite $spec }} +{{- $config := include "appdynamics-otel-collector-daemonset-linux.autoValueConfig" (dict "var1" . "var2" $spec) | deepCopy | fromYaml }} +{{- $spec := include "appdynamics-otel-collector.configToYamlString" $config | fromYaml | deepCopy | mustMergeOverwrite $spec }} +{{- toYaml $spec }} +{{- end }} + + +{{/* + statefulset linux config + var1 - global scope + var2 - computed spec +*/}} +{{- define "appdynamics-otel-collector-statefulset-linux.autoValueConfig" -}} +{{- $mergedConfig := (include "appdynamics-otel-collector-statefulset.autoValueConfig" . | fromYaml ) }} +{{- $mergedConfig := mustMergeOverwrite $mergedConfig (include "appdynamics-otel-collector.agentManagementNameConfig" (dict "name" (include "appdynamics-otel-collector.statefulset.fullname" .var1) "var1" .var1) | fromYaml ) }} +{{- with .var1.Values.env.linux.configOverride }} +{{- $mergedConfig := mustMergeOverwrite $mergedConfig (deepCopy .) }} +{{- end }} +{{- with ((.var1.Values.env.linux.mode).statefulset).configOverride }} +{{- $mergedConfig := mustMergeOverwrite $mergedConfig (deepCopy .) }} +{{- end }} +{{- toYaml $mergedConfig }} +{{- end }} + +{{/* + statefulset linux spec +*/}} +{{- define "appdynamics-otel-collector-statefulset-linux.spec" -}} +{{- $spec := (include "appdynamics-otel-collector-statefulset.spec" . | fromYaml) }} +{{- $config := include "appdynamics-otel-collector-statefulset-linux.autoValueConfig" (dict "var1" . "var2" $spec) | deepCopy | fromYaml }} +{{- $spec := include "appdynamics-otel-collector.configToYamlString" $config | fromYaml | deepCopy | mustMergeOverwrite $spec }} +{{- if .Values.env.linux.spec -}} +{{- $spec := .Values.env.linux.spec | deepCopy | mustMergeOverwrite $spec }} +{{- end }} +{{- with ((.Values.env.linux.mode).statefulset).spec }} +{{- $spec := . | deepCopy | mustMergeOverwrite $spec }} +{{- end }} +{{- $spec := include "appdynamics-otel-collector.appendGoMemLimitEnv" (dict "spec" $spec "Values" .Values) | fromYaml | deepCopy | mustMergeOverwrite $spec }} +{{- toYaml $spec }} +{{- end }} \ No newline at end of file diff --git a/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-otel-collector/templates/_spec-sidecar.tpl b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-otel-collector/templates/_spec-sidecar.tpl new file mode 100644 index 00000000..91103853 --- /dev/null +++ b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-otel-collector/templates/_spec-sidecar.tpl @@ -0,0 +1,74 @@ +{{- define "appdynamics-otel-collector.sidecar.clientSideBalancing" -}} +{{- if .Values.presets.multi_tier.sidecar.client_side_loadbalancing }} +exporters: + otlp: + endpoint: dns:///appdynamics-otel-collector-service-headless.{{ include "appdynamics-otel-collector.namespace" .}}.svc.cluster.local:4317 + balancer_name: round_robin + tls: + insecure: true +{{- end }} +{{- end }} + +{{- define "appdynamics-otel-collector.sidecar.selfTelemetryOverride" -}} +{{- if .Values.selfTelemetry }} +processors: + batch/self: + send_batch_size: 100 + timeout: 1s +service: + pipelines: + metrics/self: + exporters: [otlp] +{{- end }} +{{- end }} + +{{- define "appdynamics-otel-collector.sidecar.autoValueConfig" -}} +{{- $mergedConfig := tpl (get .var1.Values.presets.multi_tier.sidecar "config" | deepCopy | toYaml) .var1 | fromYaml}} +{{- if eq .var1.Values.presets.presampler.deploy_mode "sidecar"}} +{{- $mergedConfig := mustMergeOverwrite $mergedConfig (include "appdynamics-otel-collector.presampler" .var1 | fromYaml )}} +{{- end }} +{{- $mergedConfig := mustMergeOverwrite $mergedConfig (include "appdynamics-otel-collector.samplerdebug" .var1 | fromYaml )}} +{{- $deploy_mode := split "_" .var1.Values.presets.tailsampler.deploy_mode }} +{{- if and .var1.Values.presets.tailsampler.enable (eq $deploy_mode._0 "sidecar")}} +{{- $mergedConfig := mustMergeOverwrite $mergedConfig (include "appdynamics-otel-collector.tailsampler.loadbalancing" .var1 | fromYaml )}} +{{- $mergedConfig := mustMergeOverwrite $mergedConfig (include "appdynamics-otel-collector.tailsampler.tlsConfig.loadbalancing" .var1 | fromYaml )}} +{{- $mergedConfig := mustMergeOverwrite $mergedConfig (include "appdynamics-otel-collector.tailsampler.tlsConfigFromSecrets.loadbalancing" .var1 | fromYaml )}} +{{- end }} +{{- $mergedConfig := mustMergeOverwrite $mergedConfig (include "appdynamics-otel-collector.memoryLimiter" .var2.resources.limits | fromYaml ) }} +{{- $mergedConfig := mustMergeOverwrite $mergedConfig (include "appdynamics-otel-collector.selfTelemetry" (dict "var1" .var1 "var2" "appd-otel-col-sidecar") | fromYaml ) }} +{{- $mergedConfig := mustMergeOverwrite $mergedConfig (include "appdynamics-otel-collector.sidecar.selfTelemetryOverride" .var1 | fromYaml ) }} +{{- $mergedConfig := mustMergeOverwrite $mergedConfig (include "appdynamics-otel-collector.traceContextPropagation" .var1 | fromYaml ) }} +{{- $mergedConfig := mustMergeOverwrite $mergedConfig (include "appdynamics-otel-collector.chartInfo" .var1 | fromYaml ) }} +{{- $mergedConfig := mustMergeOverwrite $mergedConfig (include "appdynamics-otel-collector.sidecar.clientSideBalancing" .var1 | fromYaml ) }} +{{- $mergedConfig := tpl ($mergedConfig | toYaml) .var1 | fromYaml }} +{{- if .var1.Values.presets.multi_tier.sidecar.configOverride }} +{{- $mergedConfig := mustMergeOverwrite $mergedConfig (deepCopy .var1.Values.presets.multi_tier.sidecar.configOverride)}} +{{- end }} +{{- toYaml $mergedConfig }} +{{- end }} + +{{- define "appdynamics-otel-collector.sidecar.spec" -}} +{{- $spec := .Values.presets.multi_tier.sidecar.spec | deepCopy }} +{{- $spec := include "appdynamics-otel-collector.valueTLSVolume" . | fromYaml | deepCopy | mustMergeOverwrite $spec }} +{{- $spec := include "appdynamics-otel-collector.valueServiceAccount" . | fromYaml | deepCopy | mustMergeOverwrite $spec }} +{{- $config := include "appdynamics-otel-collector.sidecar.autoValueConfig" (dict "var1" . "var2" $spec) | deepCopy | fromYaml }} +{{- $spec := include "appdynamics-otel-collector.selfTelemetry.spec" . | fromYaml | deepCopy | mustMergeOverwrite $spec }} +{{- $spec := include "appdynamics-otel-collector.configToYamlString" $config | fromYaml | deepCopy | mustMergeOverwrite $spec }} +{{- toYaml $spec }} +{{- end }} + +{{- define "appdynamics-otel-collector.sidecar.linux.spec" -}} +{{- $spec := (include "appdynamics-otel-collector.sidecar.spec" . | fromYaml) }} +{{- if .Values.presets.multi_tier.sidecar.env.linux.spec -}} +{{- $spec := .Values.presets.multi_tier.sidecar.env.linux.spec | deepCopy | mustMergeOverwrite $spec }} +{{- end }} +{{- toYaml $spec }} +{{- end }} + +{{- define "appdynamics-otel-collector.sidecar.windows.spec" -}} +{{- $spec := (include "appdynamics-otel-collector.sidecar.spec" . | fromYaml) }} +{{- if .Values.presets.multi_tier.sidecar.env.windows.spec -}} +{{- $spec := .Values.presets.multi_tier.sidecar.env.windows.spec | deepCopy | mustMergeOverwrite $spec }} +{{- end }} +{{- toYaml $spec }} +{{- end }} \ No newline at end of file diff --git a/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-otel-collector/templates/_spec-statefulset.tpl b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-otel-collector/templates/_spec-statefulset.tpl new file mode 100644 index 00000000..cd8d5840 --- /dev/null +++ b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-otel-collector/templates/_spec-statefulset.tpl @@ -0,0 +1,140 @@ +{{/* + Prometheus config +*/}} +{{- define "appdynamics-otel-collector-statefulset.prometheusConfig" -}} +{{- if .Values.enablePrometheus }} +receivers: + prometheus: + config: + scrape_configs: + - job_name: 'prometheus-exporter-endpoints' + scrape_interval: 60s + kubernetes_sd_configs: + - role: endpoints + + relabel_configs: + - source_labels: [__meta_kubernetes_service_annotation_appdynamics_com_exporter_type] + action: keep + regex: (redis|kafka|kafkajmx) + replacement: $$1 + - source_labels: [__meta_kubernetes_endpoint_ready] + action: keep + regex: (.+) + replacement: $$1 + - source_labels: [__meta_kubernetes_service_annotation_prometheus_io_path] + action: replace + target_label: __metrics_path__ + regex: (.+) + replacement: $$1 + - source_labels: [__address__, __meta_kubernetes_service_annotation_prometheus_io_port] + action: replace + target_label: __address__ + regex: ([^:]+)(?::\d+)?;(\d+) + replacement: $$1:$$2 + - source_labels: [__meta_kubernetes_service_annotation_appdynamics_com_exporter_type] + action: replace + target_label: appdynamics_exporter_type + replacement: $$1 + - source_labels: [__meta_kubernetes_service_annotation_appdynamics_com_kafka_cluster_name] + action: replace + target_label: kafka_cluster_name + replacement: $$1 + + metric_relabel_configs: + - source_labels: [__name__] + regex: "kafka_(.+)|java_(.+)|redis_blocked_clients|redis_commands_duration_seconds_total|redis_commands_processed_total|redis_commands_total|redis_config_maxclients|redis_connected_clients|redis_connected_slave(.+)|redis_connections_received_total|redis_cpu_sys_children_seconds_total|redis_cpu_sys_seconds_total|redis_cpu_user_children_seconds_total|redis_cpu_user_seconds_total|redis_db_(.+)|redis_(.+)_keys_total|redis_instance_info|redis_keyspace_(.+)|redis_master_last_io_seconds_ago|redis_master_link_up|redis_master_sync_in_progress|redis_mem_fragmentation_ratio|redis_memory_max_bytes|redis_memory_used_bytes|redis_memory_used_dataset_bytes|redis_memory_used_lua_bytes|redis_memory_used_overhead_bytes|redis_memory_used_scripts_bytes|redis_net_(.+)|redis_pubsub_(.+)|redis_rdb_changes_since_last_save|redis_rdb_last_save_timestamp_seconds|redis_rejected_connections_total|redis_slave_info|redis_slowlog_length|redis_up(.*)" + action: keep + - source_labels: [__name__] + regex: "kafka_exporter_build_info|kafka_consumergroup_current_offset|kafka_consumergroup_lag|kafka_topic_partition_current_offset|kafka_topic_partition_in_sync_replica|kafka_topic_partition_leader|kafka_topic_partition_leader_is_preferred|kafka_topic_partition_oldest_offset" + action: drop + +processors: + groupbyattrs/prometheus: + keys: + - appdynamics_exporter_type + resource/prometheus: + attributes: + - key: telemetry.sdk.name + value: "prometheus" + action: upsert + - key: prometheus.exporter_type + from_attribute: appdynamics_exporter_type + action: upsert + - key: appdynamics_exporter_type + action: delete + - key: k8s.cluster.name + value: {{ (include "appdynamics-otel-collector.cluster.name" .) | quote}} + action: upsert + - key: k8s.cluster.id + value: {{ (include "appdynamics-otel-collector.clusterId" .) | quote}} + action: upsert + metricstransform/prometheus: + transforms: + - include: kafka_log_log_size + match_type: strict + action: update + operations: + - action: aggregate_labels + label_set: [ kafka_cluster_name,topic ] + aggregation_type: sum + - include: kafka_topic_partition_replicas + match_type: strict + action: update + operations: + - action: aggregate_labels + label_set: [ kafka_cluster_name,topic ] + aggregation_type: sum + - include: kafka_topic_partition_under_replicated_partition + match_type: strict + action: update + operations: + - action: aggregate_labels + label_set: [ kafka_cluster_name,topic ] + aggregation_type: sum + +service: + pipelines: + metrics/prometheus: + receivers: [ prometheus ] + processors: [ memory_limiter, groupbyattrs/prometheus, resource/prometheus, metricstransform/prometheus, batch ] + exporters: [ otlphttp ] +{{- end }} +{{- end }} + +{{/* + Spec changes for Prometheus config +*/}} +{{- define "appdynamics-otel-collector-statefulset.prometheusSpec" -}} +{{- if .Values.enablePrometheus -}} +targetAllocator: + enabled: true + serviceAccount: {{ include "appdynamics-otel-collector.valueTargetAllocatorServiceAccount" . }} +{{- end }} +{{- end }} + +{{/* + statefulset config + var1 - global scope + var2 - computed spec +*/}} +{{- define "appdynamics-otel-collector-statefulset.autoValueConfig" -}} +{{- $mergedConfig := (include "appdynamics-otel-collector.autoValueConfig" . | fromYaml ) }} +{{- $mergedConfig := mustMergeOverwrite $mergedConfig (include "appdynamics-otel-collector-statefulset.prometheusConfig" .var1 | fromYaml) }} +{{- $mergedConfig := mustMergeOverwrite $mergedConfig (include "appdynamics-otel-collector.agentManagementModeConfig" (dict "mode" "statefulset" "var1" .var1) | fromYaml) }} +{{- if .var1.Values.mode.statefulset.configOverride }} +{{- $mergedConfig := mustMergeOverwrite $mergedConfig (deepCopy .var1.Values.mode.statefulset.configOverride) }} +{{- end }} +{{- toYaml $mergedConfig }} +{{- end }} + +{{/* + statefulset spec +*/}} +{{- define "appdynamics-otel-collector-statefulset.spec" -}} +{{- $spec := (include "appdynamics-otel-collector.spec" . | fromYaml) }} +{{- $spec := include "appdynamics-otel-collector-statefulset.prometheusSpec" . | fromYaml | deepCopy | mustMergeOverwrite $spec }} +{{- $config := include "appdynamics-otel-collector-statefulset.autoValueConfig" (dict "var1" . "var2" $spec) | deepCopy | fromYaml }} +{{- $spec := include "appdynamics-otel-collector.configToYamlString" $config | fromYaml | deepCopy | mustMergeOverwrite $spec }} +{{- $spec := .Values.mode.statefulset.spec | deepCopy | mustMergeOverwrite $spec }} +{{- toYaml $spec }} +{{- end }} \ No newline at end of file diff --git a/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-otel-collector/templates/_spec-tailsampler.tpl b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-otel-collector/templates/_spec-tailsampler.tpl new file mode 100644 index 00000000..d5b54173 --- /dev/null +++ b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-otel-collector/templates/_spec-tailsampler.tpl @@ -0,0 +1,86 @@ +{{- define "appdynamics-otel-collector.tailsampler.autoValueConfig" -}} +{{- $otelConfig := tpl (get .var1.Values.presets.multi_tier.tailsampler "config" | deepCopy | toYaml) .var1 | fromYaml}} +{{- $mergedConfig := mustMergeOverwrite $otelConfig (include "appdynamics-otel-collector.derivedConfig" .var1 | fromYaml )}} +{{- $mergedConfig := mustMergeOverwrite $mergedConfig (include "appdynamics-otel-collector.samplerdebug" .var1 | fromYaml )}} +{{- $mergedConfig := mustMergeOverwrite $mergedConfig (include "appdynamics-otel-collector.tailsampler.sampler" .var1 | fromYaml )}} +{{- $mergedConfig := mustMergeOverwrite $mergedConfig (include "appdynamics-otel-collector.tailsampler.tlsConfig.tracegrouping" .var1 | fromYaml )}} +{{- $mergedConfig := mustMergeOverwrite $mergedConfig (include "appdynamics-otel-collector.tailsampler.tlsConfigFromSecrets.tracegrouping" .var1 | fromYaml )}} +{{- $mergedConfig := mustMergeOverwrite $mergedConfig (include "appdynamics-otel-collector.memoryLimiter" .var2.resources.limits | fromYaml ) }} +{{- $mergedConfig := mustMergeOverwrite $mergedConfig (include "appdynamics-otel-collector.selfTelemetry" .var1 | fromYaml ) }} +{{- $mergedConfig := mustMergeOverwrite $mergedConfig (include "appdynamics-otel-collector.agentManagement" .var1 | fromYaml ) }} +{{- $mergedConfig := mustMergeOverwrite $mergedConfig (include "appdynamics-otel-collector.agentManagementSelfTelemetry" .var1 | fromYaml ) }} +{{- $mergedConfig := mustMergeOverwrite $mergedConfig (include "appdynamics-otel-collector.traceContextPropagation" .var1 | fromYaml ) }} +{{- $mergedConfig := mustMergeOverwrite $mergedConfig (include "appdynamics-otel-collector.chartInfo" .var1 | fromYaml ) }} +{{- $mergedConfig := mustMergeOverwrite $mergedConfig (include "appdynamics-otel-collector.agentManagementModeConfig" (dict "mode" "deployment" "var1" .var1) | fromYaml ) }} +{{- $mergedConfig := tpl ($mergedConfig | toYaml) .var1 | fromYaml }} +{{- if .var1.Values.presets.multi_tier.tailsampler.configOverride }} +{{- $mergedConfig := mustMergeOverwrite $mergedConfig (deepCopy .var1.Values.presets.multi_tier.tailsampler.configOverride)}} +{{- end }} +{{- toYaml $mergedConfig }} +{{- end }} + +{{/* + Set service.ports into spec.ports in the value file. + If the spec.ports is already set, the service.ports section won't take any effect. +*/}} +{{- define "appdynamics-otel-collector.tailsampler.valueServicePorts" -}} +{{- if not .Values.presets.multi_tier.tailsampler.spec.ports }} +ports: +{{- .Values.presets.tailsampler.service.ports | toYaml | nindent 2}} +{{- end }} +{{- end }} + +{{- define "appdynamics-otel-collector.tailsampler.linux.autoValueConfig" -}} +{{- $mergedConfig := (include "appdynamics-otel-collector.tailsampler.autoValueConfig" . | fromYaml ) }} +{{- $mergedConfig := mustMergeOverwrite $mergedConfig (include "appdynamics-otel-collector.agentManagementNameConfig" (dict "name" (include "appdynamics-otel-collector.tailsampler.fullname" .var1) "var1" .var1) | fromYaml ) }} +{{- with .var1.Values.presets.multi_tier.tailsampler.env.linux.configOverride }} +{{- $mergedConfig := mustMergeOverwrite $mergedConfig (deepCopy .) }} +{{- end }} +{{- toYaml $mergedConfig }} +{{- end }} + +{{- define "appdynamics-otel-collector.tailsampler.replicas" -}} +replicas: {{ .Values.presets.tailsampler.replicas }} +{{- end }} + +{{- define "appdynamics-otel-collector.tailsampler.spec" -}} +{{- $spec := .Values.presets.multi_tier.tailsampler.spec | deepCopy }} +{{- $spec := include "appdynamics-otel-collector.tailsampler.replicas" . | fromYaml | deepCopy | mustMergeOverwrite $spec }} +{{- $spec := include "appdynamics-otel-collector.appendEnv" . | fromYaml | deepCopy | mustMergeOverwrite $spec }} +{{- $spec := include "appdynamics-otel-collector.valuesVolume" . | fromYaml | deepCopy | mustMergeOverwrite $spec }} +{{- $spec := include "appdynamics-otel-collector.tailsampler.valueServicePorts" . | fromYaml | deepCopy | mustMergeOverwrite $spec }} +{{- $spec := include "appdynamics-otel-collector.valueServiceAccount" . | fromYaml | deepCopy | mustMergeOverwrite $spec }} +{{- $spec := include "appdynamics-otel-collector.selfTelemetry.spec" . | fromYaml | deepCopy | mustMergeOverwrite $spec }} +{{- toYaml $spec }} +{{- end }} + +{{- define "appdynamics-otel-collector.tailsampler.linux.spec" -}} +{{- $spec := (include "appdynamics-otel-collector.tailsampler.spec" . | fromYaml) }} +{{- if .Values.presets.multi_tier.tailsampler.env.linux.spec -}} +{{- $spec := .Values.presets.multi_tier.tailsampler.env.linux.spec | deepCopy | mustMergeOverwrite $spec }} +{{- end }} +{{- $spec := include "appdynamics-otel-collector.appendGoMemLimitEnv" (dict "spec" $spec "Values" .Values) | fromYaml | deepCopy | mustMergeOverwrite $spec }} +{{- $config := include "appdynamics-otel-collector.tailsampler.linux.autoValueConfig" (dict "var1" . "var2" $spec) | deepCopy | fromYaml }} +{{- $spec := include "appdynamics-otel-collector.configToYamlString" $config | fromYaml | deepCopy | mustMergeOverwrite $spec }} +{{- toYaml $spec }} +{{- end }} + +{{- define "appdynamics-otel-collector.tailsampler.windows.autoValueConfig" -}} +{{- $mergedConfig := (include "appdynamics-otel-collector.tailsampler.autoValueConfig" . | fromYaml ) }} +{{- $mergedConfig := mustMergeOverwrite $mergedConfig (include "appdynamics-otel-collector.agentManagementNameConfig" (dict "name" (include "appdynamics-otel-collector.tailsampler.windows.fullname" .var1) "var1" .var1) | fromYaml ) }} +{{- with .var1.Values.presets.multi_tier.tailsampler.env.windows.configOverride }} +{{- $mergedConfig := mustMergeOverwrite $mergedConfig (deepCopy .) }} +{{- end }} +{{- toYaml $mergedConfig }} +{{- end }} + +{{- define "appdynamics-otel-collector.tailsampler.windows.spec" -}} +{{- $spec := (include "appdynamics-otel-collector.tailsampler.spec" . | fromYaml) }} +{{- if .Values.presets.multi_tier.tailsampler.env.windows.spec -}} +{{- $spec := .Values.presets.multi_tier.tailsampler.env.windows.spec | deepCopy | mustMergeOverwrite $spec }} +{{- end }} +{{- $spec := include "appdynamics-otel-collector.appendGoMemLimitEnv" (dict "spec" $spec "Values" .Values) | fromYaml | deepCopy | mustMergeOverwrite $spec }} +{{- $config := include "appdynamics-otel-collector.tailsampler.windows.autoValueConfig" (dict "var1" . "var2" $spec) | deepCopy | fromYaml }} +{{- $spec := include "appdynamics-otel-collector.configToYamlString" $config | fromYaml | deepCopy | mustMergeOverwrite $spec }} +{{- toYaml $spec }} +{{- end }} \ No newline at end of file diff --git a/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-otel-collector/templates/_spec-windows.tpl b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-otel-collector/templates/_spec-windows.tpl new file mode 100644 index 00000000..3e7fb0f8 --- /dev/null +++ b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-otel-collector/templates/_spec-windows.tpl @@ -0,0 +1,69 @@ +{{/* + daemonset windows config + @param .var1 global scope + @param .var2 computed spec +*/}} +{{- define "appdynamics-otel-collector-daemonset-windows.autoValueConfig" -}} +{{- $mergedConfig := (include "appdynamics-otel-collector-daemonset.autoValueConfig" . | fromYaml ) }} +{{- $mergedConfig := mustMergeOverwrite $mergedConfig (include "appdynamics-otel-collector.agentManagementNameConfig" (dict "name" (include "appdynamics-otel-collector.daemonset.windows.fullname" .var1) "var1" .var1) | fromYaml ) }} +{{- with .var1.Values.env.windows.configOverride }} +{{- $mergedConfig := mustMergeOverwrite $mergedConfig (deepCopy .) }} +{{- end }} +{{- with ((.var1.Values.env.windows.mode).daemonset).configOverride }} +{{- $mergedConfig := mustMergeOverwrite $mergedConfig (deepCopy .) }} +{{- end }} +{{- toYaml $mergedConfig }} +{{- end }} + + +{{/* + daemonset windows spec +*/}} +{{- define "appdynamics-otel-collector-daemonset-windows.spec" -}} +{{- $spec := include "appdynamics-otel-collector-daemonset.spec" . | fromYaml }} +{{- if .Values.env.windows.spec -}} +{{- $spec := .Values.env.windows.spec | deepCopy | mustMergeOverwrite $spec }} +{{- end }} +{{- with ((.Values.env.windows.mode).daemonset).spec }} +{{- $spec := . | deepCopy | mustMergeOverwrite $spec }} +{{- end }} +{{- $spec := include "appdynamics-otel-collector.appendGoMemLimitEnv" (dict "spec" $spec "Values" .Values) | fromYaml | deepCopy | mustMergeOverwrite $spec }} +{{- $config := (include "appdynamics-otel-collector-daemonset-windows.autoValueConfig" (dict "var1" . "var2" $spec)) | deepCopy | fromYaml }} +{{- $spec := include "appdynamics-otel-collector.configToYamlString" $config | fromYaml | deepCopy | mustMergeOverwrite $spec }} +{{- toYaml $spec }} +{{- end }} + + +{{/* + statefulset windows config + @param .var1 global scope + @param .var2 computed spec +*/}} +{{- define "appdynamics-otel-collector-statefulset-windows.autoValueConfig" -}} +{{- $mergedConfig := (include "appdynamics-otel-collector-statefulset.autoValueConfig" . | fromYaml ) }} +{{- $mergedConfig := mustMergeOverwrite $mergedConfig (include "appdynamics-otel-collector.agentManagementNameConfig" (dict "name" (include "appdynamics-otel-collector.statefulset.windows.fullname" .var1) "var1" .var1) | fromYaml ) }} +{{- with .var1.Values.env.windows.configOverride }} +{{- $mergedConfig := mustMergeOverwrite $mergedConfig (deepCopy .) }} +{{- end }} +{{- with ((.var1.Values.env.windows.mode).statefulset).configOverride }} +{{- $mergedConfig := mustMergeOverwrite $mergedConfig (deepCopy .) }} +{{- end }} +{{- toYaml $mergedConfig }} +{{- end }} + +{{/* + statefulset windows spec +*/}} +{{- define "appdynamics-otel-collector-statefulset-windows.spec" -}} +{{- $spec := (include "appdynamics-otel-collector-statefulset.spec" . | fromYaml) }} +{{- $config := include "appdynamics-otel-collector-statefulset-windows.autoValueConfig" (dict "var1" . "var2" $spec) | deepCopy | fromYaml }} +{{- $spec := include "appdynamics-otel-collector.configToYamlString" $config | fromYaml | deepCopy | mustMergeOverwrite $spec }} +{{- if .Values.env.windows.spec -}} +{{- $spec := .Values.env.windows.spec | deepCopy | mustMergeOverwrite $spec }} +{{- end }} +{{- with ((.Values.env.windows.mode).statefulset).spec }} +{{- $spec := . | deepCopy | mustMergeOverwrite $spec }} +{{- end }} +{{- $spec := include "appdynamics-otel-collector.appendGoMemLimitEnv" (dict "spec" $spec "Values" .Values) | fromYaml | deepCopy | mustMergeOverwrite $spec }} +{{- toYaml $spec }} +{{- end }} \ No newline at end of file diff --git a/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-otel-collector/templates/_spec.tpl b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-otel-collector/templates/_spec.tpl new file mode 100644 index 00000000..b581a256 --- /dev/null +++ b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-otel-collector/templates/_spec.tpl @@ -0,0 +1,682 @@ +{{/* + Derived configuation from top level properties +*/}} +{{/* + downward api environment variable + https://kubernetes.io/docs/concepts/workloads/pods/downward-api/ + params + envName - environment variable name + path - field path +*/}} +{{- define "appdynamics-otel-collector.downwardEnvVar" -}} +name: {{.envName}} +valueFrom: + fieldRef: + fieldPath: {{.path}} +{{- end }} + +{{- define "appdynamics-otel-collector.derivedConfig" -}} +extensions: + oauth2client: +{{- (include "appdynamics-otel-collector.derivedOAuth" .) | nindent 4}} +exporters: + otlphttp: + metrics_endpoint: {{ include "appdynamics-otel-collector.endpoint" . }}/v1/metrics + traces_endpoint: {{ include "appdynamics-otel-collector.endpoint" . }}/v1/trace + logs_endpoint: {{ include "appdynamics-otel-collector.endpoint" . }}/v1/logs + retry_on_failure: + max_elapsed_time: 180s +{{- end }} + +{{- define "appdynamics-otel-collector.tlsConfig" -}} +{{- if .Values.global.tls.otelReceiver.settings }} +receivers: + otlp: + protocols: + grpc: + tls: +{{- deepCopy .Values.global.tls.otelReceiver.settings | toYaml | nindent 10}} + http: + tls: +{{- deepCopy .Values.global.tls.otelReceiver.settings | toYaml | nindent 10}} + otlp/lca: + protocols: + grpc: + tls: +{{- deepCopy .Values.global.tls.otelReceiver.settings | toYaml | nindent 10}} + http: + tls: +{{- deepCopy .Values.global.tls.otelReceiver.settings | toYaml | nindent 10}} +{{- end }} +{{- if .Values.global.tls.otelExporter.settings }} +extensions: + oauth2client: + tls: +{{- deepCopy .Values.global.tls.otelExporter.settings | toYaml | nindent 6}} +exporters: + otlphttp: + tls: +{{- deepCopy .Values.global.tls.otelExporter.settings | toYaml | nindent 6}} +{{- end }} +{{- end }} + +{{- define "appdynamics-otel-collector.traceContextPropagation" -}} +{{- if .Values.traceContextPropagation }} +service: + telemetry: + traces: + propagators: + - tracecontext +{{- end }} +{{- end }} + +{{- define "appdynamics-otel-collector.selfTelemetry" -}} +{{- if .Values.selfTelemetry }} +receivers: + prometheus/self: + config: + scrape_configs: + - job_name: {{ .Values.selfTelemetryServiceName | default "appd-otel-collector" | quote }} + scrape_interval: 60s + static_configs: + - targets: ["localhost:8888"] +exporters: + logging: + verbosity: detailed +processors: + batch/self: + transform/self: + metric_statements: + - context: resource + statements: + - set(attributes["prometheus.targets"],attributes["service.instance.id"]) where attributes["prometheus.targets"] == nil + - set(attributes["service.namespace"], "otelcol") + - set(attributes["otel.collector.description"], "AppDynamics Distribution of OpenTelemetry collector.") + - set(attributes["service.version"],attributes["service_version"]) + - set(attributes["telemetry.sdk.name"],"opentelemetry") + - set(attributes["k8s.pod.uid"], "${POD_UID}" ) + - set(attributes["k8s.cluster.name"], "test-cluster") + - context: datapoint + statements: + - set(resource.attributes["service.instance.id"],attributes["service_instance_id"]) + {{- if .Values.setPodUID }} + - set(resource.attributes["k8s.pod.uid"], "${POD_UID}" ) + {{- end }} + - set(resource.attributes["k8s.cluster.name"], {{ include "appdynamics-otel-collector.cluster.name" . | quote }}) + - set(resource.attributes["k8s.cluster.id"], {{ include "appdynamics-otel-collector.clusterId" . | quote}}) +service: + pipelines: + metrics/self: + receivers: [prometheus/self] + processors: [ memory_limiter, transform/self, batch/self] + exporters: [otlphttp] +{{- end }} +{{- end }} + +{{- define "appdynamics-otel-collector.agentManagement" -}} +{{- if or .Values.agentManagement .Values.agentManagementSelfTelemetry }} +extensions: + appdagentmanagementextension: + service_url: {{ include "appdynamics-otel-collector.serviceURL" . }} + agent_descriptor_type: "otel_collector" + agent_namespace: "otelcollector" + agent_name: {{.Release.Name}} + node_config: + node_name: "${NODE_NAME}" + disable_opamp: {{.Values.disableOpamp}} + oauth: + {{- (include "appdynamics-otel-collector.derivedOAuth" .) | nindent 6}} + tenant_id: {{ include "appdynamics-otel-collector.tenant.id" . }} + platform: + id: {{ (include "appdynamics-otel-collector.clusterId" .) | quote }} + name: {{ (include "appdynamics-otel-collector.cluster.name" .) | quote }} + type: k8s + deployment: + scope: {{ include "appdynamics-otel-collector.namespace" .}} + unit: "${POD_NAME}" + unique: false + helm_chart_version: {{ .Values.global.helmChartVersion }} + http_client_settings: + {{- (include "appdynamics-otel-collector.client.agent.proxy" .) | nindent 6}} +service: + extensions: [health_check, oauth2client, appdagentmanagementextension] +{{- end }} +{{- end }} + +{{- define "appdynamics-otel-collector.agentManagementSelfTelemetry" -}} +{{- if .Values.agentManagementSelfTelemetry }} +receivers: + prometheus/self: + config: + scrape_configs: + - job_name: {{ .Values.selfTelemetryServiceName | default "appd-otel-collector" | quote }} + scrape_interval: 60s + static_configs: + - targets: ["localhost:8888"] + +processors: + batch/self: + agentmanagementresource: + appd_agent_management_ext: appdagentmanagementextension + resource: + "service.name": "" + "service.instance.id": "" + "net.host.port": "" + "http.scheme": "" + "telemetry.sdk.name": opentelemetry + "service_name": "" + "service_instance_id": "" + "service_version": "" + +exporters: +{{- .Values.presets.selfTelemetry.exporters | toYaml | nindent 2}} + +service: + pipelines: + metrics/agent_management_self_telemetry: + receivers: [prometheus/self] + processors: [memory_limiter, agentmanagementresource, batch/self] +{{- $exporter_list := list }} +{{- range $k,$v:= $.Values.presets.selfTelemetry.exporters }} +{{- $exporter_list = append $exporter_list $k }} +{{- end }} + exporters: +{{- $exporter_list | toYaml | nindent 8}} + +{{- end }} +{{- end }} + +{{- define "appdynamics-otel-collector.networkMonitoring" -}} +receivers: + otlp/ebpf: + protocols: + grpc: + endpoint: 0.0.0.0:24317 + http: + endpoint: 0.0.0.0:24318 +processors: + resource/ebpf: + attributes: + - key: telemetry.sdk.name + value: agent:otelnet_collector:collector + action: upsert + - key: netperf.platform.id + action: upsert + value: {{ include "appdynamics-otel-collector.clusterId" . | quote}} + - key: netperf.platform.type + action: upsert + value: k8s + filter/ebpf: + metrics: + metric: + - 'not (HasAttrOnDatapoint("source.resolution_type", "K8S_CONTAINER") or HasAttrOnDatapoint("dest.resolution_type", "K8S_CONTAINER"))' + metricstransform/ebpf: + transforms: + - include: tcp.bytes + action: insert + new_name: dummy.endpoint.bytes + operations: + - action: update_label + label: source.workload.name + new_label: source.endpoint.name + - include: udp.bytes + action: insert + new_name: dummy.endpoint.bytes + operations: + - action: update_label + label: source.workload.name + new_label: source.endpoint.name + attributes/ebpf: + actions: + - key: source.availability_zone + action: delete + - key: dest.availability_zone + action: delete + - key: az_equal + action: delete + - key: sf_product + action: delete + - key: source.environment + action: delete + - key: dest.environment + action: delete +service: + pipelines: + metrics/ebpf: + receivers: [otlp/ebpf] + processors: [memory_limiter, resource/ebpf, filter/ebpf, metricstransform/ebpf, attributes/ebpf, batch/metrics] + exporters: [otlphttp] +{{- end }} + +{{- define "appdynamics-otel-collector.chartInfo" -}} +{{- if .Values.sendChartInfo }} +exporters: + otlphttp: + headers: + appd-collector-helm-chart-version: "{{ tpl .Chart.Version . }}" + appd-collector-helm-chart-name: "{{ tpl .Chart.Name . }}" +{{- end }} +{{- end }} + +{{/* + Default memory limiter configuration for appdynamics-otel-collector based on k8s resource limits. +*/}} +{{- define "appdynamics-otel-collector.memoryLimiter" -}} +processors: + memory_limiter: +# check_interval is the time between measurements of memory usage. + check_interval: 5s +# By default limit_mib is set to 80% of ".Values.spec.resources.limits.memory" + limit_mib: {{ include "appdynamics-otel-collector.getMemLimitMib" .memory }} +# By default spike_limit_mib is set to 25% of ".Values.spec.resources.limits.memory" + spike_limit_mib: {{ include "appdynamics-otel-collector.getMemSpikeLimitMib" .memory }} +{{- end }} + +{{- define "appdynamics-otel-collector.gomemlimit" -}} +name: GOMEMLIMIT +value: "{{ include "appdynamics-otel-collector.getMemLimitMib" .spec.resources.limits.memory }}MiB" +{{- end }} + +{{/* +Get memory_limiter limit_mib value based on 80% of resources.limits.memory. +*/}} +{{- define "appdynamics-otel-collector.getMemLimitMib" -}} +{{- div (mul (include "appdynamics-otel-collector.convertMemToMib" .) 80) 100 }} +{{- end -}} + +{{/* +Get memory_limiter spike_limit_mib value based on 25% of resources.limits.memory. +*/}} +{{- define "appdynamics-otel-collector.getMemSpikeLimitMib" -}} +{{- div (mul (include "appdynamics-otel-collector.convertMemToMib" .) 25) 100 }} +{{- end -}} + +{{/* +Convert memory value from resources.limit to numeric value in MiB to be used by otel memory_limiter processor. +*/}} +{{- define "appdynamics-otel-collector.convertMemToMib" -}} +{{- $mem := lower . -}} +{{- if hasSuffix "e" $mem -}} +{{- trimSuffix "e" $mem | atoi | mul 1000 | mul 1000 | mul 1000 | mul 1000 -}} +{{- else if hasSuffix "ei" $mem -}} +{{- trimSuffix "ei" $mem | atoi | mul 1024 | mul 1024 | mul 1024 | mul 1024 -}} +{{- else if hasSuffix "p" $mem -}} +{{- trimSuffix "p" $mem | atoi | mul 1000 | mul 1000 | mul 1000 -}} +{{- else if hasSuffix "pi" $mem -}} +{{- trimSuffix "pi" $mem | atoi | mul 1024 | mul 1024 | mul 1024 -}} +{{- else if hasSuffix "t" $mem -}} +{{- trimSuffix "t" $mem | atoi | mul 1000 | mul 1000 -}} +{{- else if hasSuffix "ti" $mem -}} +{{- trimSuffix "ti" $mem | atoi | mul 1024 | mul 1024 -}} +{{- else if hasSuffix "g" $mem -}} +{{- trimSuffix "g" $mem | atoi | mul 1000 -}} +{{- else if hasSuffix "gi" $mem -}} +{{- trimSuffix "gi" $mem | atoi | mul 1024 -}} +{{- else if hasSuffix "m" $mem -}} +{{- div (trimSuffix "m" $mem | atoi | mul 1000) 1024 -}} +{{- else if hasSuffix "mi" $mem -}} +{{- trimSuffix "mi" $mem | atoi -}} +{{- else if hasSuffix "k" $mem -}} +{{- div (trimSuffix "k" $mem | atoi) 1000 -}} +{{- else if hasSuffix "ki" $mem -}} +{{- div (trimSuffix "ki" $mem | atoi) 1024 -}} +{{- else -}} +{{- div (div ($mem | atoi) 1024) 1024 -}} +{{- end -}} +{{- end -}} + +{{- define "appdynamics-otel-collector.nonAppDTransformConfig" -}} +processors: + k8sattributes: + passthrough: false + {{- if .Values.nodeLocalTrafficMode }} + filter: + node_from_env_var: NODE_NAME + {{- end }} + transform/logs: + log_statements: + - context: resource + statements: + - set(attributes["k8s.cluster.id"], {{ (include "appdynamics-otel-collector.clusterId" .) | quote}}) + - set(attributes["internal.container.encapsulating_object_id"],Concat([attributes["k8s.cluster.id"],attributes["k8s.pod.uid"]],":")) + k8sattributes/logs: + passthrough: false + {{- if .Values.nodeLocalTrafficMode }} + filter: + node_from_env_var: NODE_NAME + {{- end }} + extract: + metadata: + - k8s.pod.name + - k8s.pod.uid + - k8s.deployment.name + - k8s.namespace.name + - k8s.node.name + - k8s.pod.start_time + - container.id + - k8s.container.name + - container.image.name + - container.image.tag +{{- end}} + +{{/* + Append auto generated env to spec +*/}} +{{- define "appdynamics-otel-collector.appendEnv" -}} +{{- $spec := get .Values "spec" }} +{{- $specEnv := get $spec "env" | deepCopy }} +{{- if not $specEnv }} +{{- $specEnv = list }} +{{- end }} +{{- if.Values.setPodUID }} +{{- $specEnv = append $specEnv (include "appdynamics-otel-collector.downwardEnvVar" (dict "envName" "POD_UID" "path" "metadata.uid") | fromYaml ) }} +{{- end}} +{{- if .Values.clientSecretEnvVar -}} +{{- $specEnv = append $specEnv (include "appdynamics-otel-collector.clientSecretEnvVar" . | fromYaml ) }} +{{- end }} + +{{- $specEnv = append $specEnv (include "appdynamics-otel-collector.downwardEnvVar" (dict "envName" "NODE_NAME" "path" "spec.nodeName") | fromYaml)}} +env: +{{- $specEnv | toYaml | nindent 2}} +{{- end }} + +{{/* + The env var that may be different from gateway collector and sampler collector + variable + spec - the collector spec + Values - global Values +*/}} +{{- define "appdynamics-otel-collector.appendGoMemLimitEnv" -}} +{{- if .Values.useGOMEMLIMIT }} +{{- $specEnv := get .spec "env" | deepCopy }} +{{- $specEnv = append $specEnv (include "appdynamics-otel-collector.gomemlimit" . | fromYaml)}} +env: +{{- $specEnv | toYaml | nindent 2}} +{{- end }} +{{- end }} + +{{/* + Set serviceAccount.name into spec.serviceAccount in the value file. + If the spec.serviceAccount is already set, the serviceAccount.name won't take any effect. + If neither spec.serviceAccount and serviceAccount.name are set, the default value will be populated to spec.serviceAccount. +*/}} +{{- define "appdynamics-otel-collector.valueServiceAccount" -}} +{{- if not .Values.spec.serviceAccount }} +serviceAccount: {{(.Values.serviceAccount.name | default (include "appdynamics-otel-collector.serviceAccountName" .))}} +{{- end }} +{{- end}} + +{{/* + Calculate the target allocator service account name. + When enable the prometheuse, the prority for finding target allocator service account is (from high to low) + - spec.targetAllocator.name + - targetAllocatorServiceServiceAccount.name + - default value - collector name concat with -target-allocator, e.g. "my-collector-target-allocator" +*/}} +{{- define "appdynamics-otel-collector.valueTargetAllocatorServiceAccount" -}} +{{- if .Values.spec.targetAllocator }} +{{- .Values.spec.targetAllocator.serviceAccount | default .Values.targetAllocatorServiceAccount.name | default (include "appdynamics-otel-collector.targetAllocatorServiceAccountName" .) }} +{{- else }} +{{- .Values.targetAllocatorServiceAccount.name | default (include "appdynamics-otel-collector.targetAllocatorServiceAccountName" .) -}} +{{- end }} +{{- end }} + + +{{- define "appdynamics-otel-collector.serverDefaultPaths" -}} +{{- if .secret }} +{{ $path := .path | default "/etc/otel/certs/receiver"}} +{{- if .secret.secretKeys.caCert}} +ca_file: {{$path}}/{{.secret.secretKeys.caCert}} +{{- if .mtlsEnabled}} +client_ca_file: {{$path}}/{{.secret.secretKeys.caCert}} +{{- end}} +{{- end}} +cert_file: {{$path}}/{{.secret.secretKeys.tlsCert}} +key_file: {{$path}}/{{.secret.secretKeys.tlsKey}} +{{- end }} +{{- end }} + +{{- define "appdynamics-otel-collector.clientDefaultPaths" -}} +{{ $path := .path | default "/etc/otel/certs/exporter"}} +{{- if .secretKeys.caCert}} +ca_file: {{$path}}/{{.secretKeys.caCert}} +{{- end }} +cert_file: {{$path}}/{{.secretKeys.tlsCert}} +key_file: {{$path}}/{{.secretKeys.tlsKey}} +{{- end }} + +{{- define "appdynamics-otel-collector.clientSecretVolumePath" -}} +{{- $path := .path | default "/etc/otel/oauth/secret" -}} +${file:{{$path}}/{{.secretKey}}} +{{- end }} + +{{- define "appdynamics-otel-collector.tlsSecrets" -}} +secret: + secretName: {{.secretName}} + items: + {{- if .secretKeys.caCert}} + - key: {{.secretKeys.caCert}} + path: {{.secretKeys.caCert}} + {{- end }} + - key: {{required ".secretKeys.tlsCert is required" .secretKeys.tlsCert}} + path: {{.secretKeys.tlsCert}} + - key: {{required ".secretKeys.tlsKey is required" .secretKeys.tlsKey}} + path: {{.secretKeys.tlsKey}} +{{- end}} + +{{- define "appdynamics-otel-collector.clientSecret" -}} +secret: + secretName: {{.secretName}} + items: + - key: {{.secretKey}} + path: {{.secretKey}} +{{- end }} + +{{/* + calculate volume mounts for tls secret and client secret +*/}} +{{- define "appdynamics-otel-collector.valuesVolume" -}} +{{- if or .Values.clientSecretVolume (or .Values.global.tls.otelReceiver.secret .Values.global.tls.otelExporter.secret)}} +volumeMounts: +{{- with .Values.global.tls.otelReceiver.secret}} +{{ $path := .path | default "/etc/otel/certs/receiver"}} +- name: tlsotelreceiversecrets + mountPath: {{$path}} +{{- end}} +{{- with .Values.global.tls.otelExporter.secret}} +{{ $path := .path | default "/etc/otel/certs/exporter"}} +- name: tlsotelexportersecrets + mountPath: {{$path}} +{{- end}} +{{- with .Values.clientSecretVolume}} +{{ $path := .path | default "/etc/otel/oauth/secret"}} +- name: clientsecret + mountPath: {{$path}} +{{- end }} + +volumes: +{{- with .Values.global.tls.otelReceiver.secret}} +- name: tlsotelreceiversecrets +{{- (include "appdynamics-otel-collector.tlsSecrets" .) | nindent 2}} +{{- end }} +{{- with .Values.global.tls.otelExporter.secret}} +- name: tlsotelexportersecrets +{{- (include "appdynamics-otel-collector.tlsSecrets" .) | nindent 2}} +{{- end }} +{{- with .Values.clientSecretVolume}} +- name: clientsecret +{{- (include "appdynamics-otel-collector.clientSecret" .) | nindent 2}} +{{- end }} + +{{- end }} +{{- end }} + +{{/* + Generate tls cert paths from volume mounts dervied from secrets +*/}} +{{- define "appdynamics-otel-collector.tlsConfigFromSecrets" -}} +{{- with .Values.global.tls.otelReceiver}} +receivers: + otlp: + protocols: + grpc: + tls: +{{- (include "appdynamics-otel-collector.serverDefaultPaths" .) | nindent 10}} + http: + tls: +{{- (include "appdynamics-otel-collector.serverDefaultPaths" .) | nindent 10}} + otlp/lca: + protocols: + grpc: + tls: +{{- (include "appdynamics-otel-collector.serverDefaultPaths" .) | nindent 10}} + http: + tls: +{{- (include "appdynamics-otel-collector.serverDefaultPaths" .) | nindent 10}} +{{- end }} +{{- with .Values.global.tls.otelExporter.secret }} +extensions: + oauth2client: + tls: +{{- (include "appdynamics-otel-collector.clientDefaultPaths" .) | nindent 6}} +exporters: + otlphttp: + tls: +{{- (include "appdynamics-otel-collector.clientDefaultPaths" .) | nindent 6}} +{{- end }} +{{- end }} + +{{- define "appdynamics-otel-collector.truncated" -}} +processors: + transform/truncate: + error_mode: ignore + trace_statements: + {{- if .Values.presets.truncated.trace.span }} + - context: span + statements: + - truncate_all(attributes, {{.Values.presets.truncated.trace.span}}) + {{- end }} + {{- if .Values.presets.truncated.trace.spanevent }} + - context: spanevent + statements: + - truncate_all(attributes, {{.Values.presets.truncated.trace.spanevent}}) + {{- end }} + {{- if .Values.presets.truncated.trace.resource }} + - context: resource + statements: + - truncate_all(attributes, {{.Values.presets.truncated.trace.resource}}) + {{- end }} + {{- if .Values.presets.truncated.trace.scope }} + - context: scope + statements: + - truncate_all(attributes, {{.Values.presets.truncated.trace.scope}}) + {{- end }} + metric_statements: + {{- if .Values.presets.truncated.metric.resource }} + - context: resource + statements: + - truncate_all(attributes, {{.Values.presets.truncated.metric.resource}}) + {{- end }} + {{- if .Values.presets.truncated.metric.scope }} + - context: scope + statements: + - truncate_all(attributes, {{.Values.presets.truncated.metric.scope}}) + {{- end }} + {{- if .Values.presets.truncated.metric.datapoint }} + - context: datapoint + statements: + - truncate_all(attributes, {{.Values.presets.truncated.metric.datapoint}}) + {{- end }} + log_statements: + {{- if .Values.presets.truncated.log.resource }} + - context: resource + statements: + - truncate_all(attributes, {{.Values.presets.truncated.log.resource}}) + {{- end }} + {{- if .Values.presets.truncated.log.scope }} + - context: scope + statements: + - truncate_all(attributes, {{.Values.presets.truncated.log.scope}}) + {{- end }} + {{- if .Values.presets.truncated.log.log }} + - context: log + statements: + - truncate_all(attributes, {{.Values.presets.truncated.log.log}}) + {{- end }} +{{- end }} + +{{/* + Auto generated otel collector configs. We need to compute memoryLimiter config from spec, thus we defined to template variable + var1 - global scope + var2 - computed spec +*/}} +{{- define "appdynamics-otel-collector.autoValueConfig" -}} +{{- $otelConfig := tpl (get .var1.Values "config" | deepCopy | toYaml) .var1 | fromYaml}} +{{- $mergedConfig := mustMergeOverwrite $otelConfig (include "appdynamics-otel-collector.derivedConfig" .var1 | fromYaml )}} +{{- if eq .var1.Values.presets.presampler.deploy_mode "gateway"}} +{{- $mergedConfig := mustMergeOverwrite $mergedConfig (include "appdynamics-otel-collector.presampler" .var1 | fromYaml )}} +{{- end }} +{{- $mergedConfig := mustMergeOverwrite $mergedConfig (include "appdynamics-otel-collector.samplerdebug" .var1 | fromYaml )}} + +{{- $deploy_mode := split "_" .var1.Values.presets.tailsampler.deploy_mode }} +{{- if and .var1.Values.presets.tailsampler.enable (eq $deploy_mode._0 "gateway")}} +{{- $mergedConfig := mustMergeOverwrite $mergedConfig (include "appdynamics-otel-collector.tailsampler.loadbalancing" .var1 | fromYaml )}} +{{- $mergedConfig := mustMergeOverwrite $mergedConfig (include "appdynamics-otel-collector.tailsampler.tlsConfig.loadbalancing" .var1 | fromYaml )}} +{{- $mergedConfig := mustMergeOverwrite $mergedConfig (include "appdynamics-otel-collector.tailsampler.tlsConfigFromSecrets.loadbalancing" .var1 | fromYaml )}} +{{- end }} + +{{- if and .var1.Values.presets.tailsampler.enable (eq $deploy_mode._1 "gateway")}} +{{- $mergedConfig := mustMergeOverwrite $mergedConfig (include "appdynamics-otel-collector.tailsampler.sampler" .var1 | fromYaml )}} +{{- $mergedConfig := mustMergeOverwrite $mergedConfig (include "appdynamics-otel-collector.tailsampler.tlsConfig.tracegrouping" .var1 | fromYaml )}} +{{- $mergedConfig := mustMergeOverwrite $mergedConfig (include "appdynamics-otel-collector.tailsampler.tlsConfigFromSecrets.tracegrouping" .var1 | fromYaml )}} +{{- end }} + +{{- $mergedConfig := mustMergeOverwrite $mergedConfig (include "appdynamics-otel-collector.tlsConfigFromSecrets" .var1 | fromYaml ) }} +{{- $mergedConfig := mustMergeOverwrite $mergedConfig (include "appdynamics-otel-collector.tlsConfig" .var1 | fromYaml ) }} +{{- $mergedConfig := mustMergeOverwrite $mergedConfig (include "appdynamics-otel-collector.memoryLimiter" .var2.resources.limits | fromYaml ) }} +{{- $mergedConfig := mustMergeOverwrite $mergedConfig (include "appdynamics-otel-collector.truncated" .var1 | fromYaml ) }} +{{- $mergedConfig := mustMergeOverwrite $mergedConfig (include "appdynamics-otel-collector.selfTelemetry" .var1 | fromYaml ) }} +{{- $mergedConfig := mustMergeOverwrite $mergedConfig (include "appdynamics-otel-collector.agentManagement" .var1 | fromYaml ) }} +{{- $mergedConfig := mustMergeOverwrite $mergedConfig (include "appdynamics-otel-collector.agentManagementSelfTelemetry" .var1 | fromYaml ) }} +{{- if .var1.Values.enableNetworkMonitoring }} +{{- $mergedConfig := mustMergeOverwrite $mergedConfig (include "appdynamics-otel-collector.networkMonitoring" .var1 | fromYaml ) }} +{{- end }} +{{- $mergedConfig := mustMergeOverwrite $mergedConfig (include "appdynamics-otel-collector.traceContextPropagation" .var1 | fromYaml ) }} +{{- $mergedConfig := mustMergeOverwrite $mergedConfig (include "appdynamics-otel-collector.chartInfo" .var1 | fromYaml ) }} +{{- $mergedConfig := mustMergeOverwrite $mergedConfig (include "appdynamics-otel-collector.nonAppDTransformConfig" .var1 | fromYaml ) }} +{{- $mergedConfig := tpl ($mergedConfig | toYaml) .var1 | fromYaml }} +{{- if .var1.Values.configOverride }} +{{- $mergedConfig := mustMergeOverwrite $mergedConfig (deepCopy .var1.Values.configOverride)}} +{{- end }} +{{- toYaml $mergedConfig }} +{{- end }} + + +{{/* + convert config map to yaml multiline string +*/}} +{{- define "appdynamics-otel-collector.configToYamlString" -}} +config: |- +{{- . | toYaml | nindent 2 }} +{{- end }} + +{{- define "appdynamics-otel-collector.selfTelemetry.spec" -}} +#{{- if .Values.selfTelemetry }} +#args: +# "feature-gates": "telemetry.useOtelForInternalMetrics,telemetry.useOtelWithSDKConfigurationForInternalTelemetry" +{{- end }} +{{- end }} + +{{/* + Basic spec. Combine the sections into spec. +*/}} +{{- define "appdynamics-otel-collector.spec" -}} +{{- $spec := .Values.spec | deepCopy }} +{{- $spec := include "appdynamics-otel-collector.appendEnv" . | fromYaml | deepCopy | mustMergeOverwrite $spec }} +{{- $spec := include "appdynamics-otel-collector.valuesVolume" . | fromYaml | deepCopy | mustMergeOverwrite $spec }} +{{- $spec := include "appdynamics-otel-collector.valueServiceAccount" . | fromYaml | deepCopy | mustMergeOverwrite $spec }} +{{- $spec := include "appdynamics-otel-collector.selfTelemetry.spec" . | fromYaml | deepCopy | mustMergeOverwrite $spec }} +{{- $config := include "appdynamics-otel-collector.autoValueConfig" (dict "var1" . "var2" $spec) | deepCopy | fromYaml }} +{{- $spec := include "appdynamics-otel-collector.configToYamlString" $config | fromYaml | deepCopy | mustMergeOverwrite $spec }} +{{- toYaml $spec }} +{{- end }} diff --git a/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-otel-collector/templates/opentelemetry-collector-daemonset.yaml b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-otel-collector/templates/opentelemetry-collector-daemonset.yaml new file mode 100644 index 00000000..516c56b8 --- /dev/null +++ b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-otel-collector/templates/opentelemetry-collector-daemonset.yaml @@ -0,0 +1,43 @@ +{{- if .Values.install -}} +{{- if or .Values.enableFileLog (not .Values.enablePrometheus) }} + +{{- if (has "linux" .Values.os) }} +apiVersion: opentelemetry.io/v1alpha1 +kind: OpenTelemetryCollector +metadata: + annotations: +{{- .Values.annotations | toYaml | nindent 4 }} + labels: +{{- include "appdynamics-otel-collector.finalLabelsDaemonset" . | nindent 4 }} + name: {{ include "appdynamics-otel-collector.daemonset.fullname" .}} + namespace: {{ include "appdynamics-otel-collector.namespace" .}} +spec: +{{- include "appdynamics-otel-collector-daemonset-linux.spec" . | nindent 2 }} +{{- if .Values.status }} +status: +{{- .Values.status | toYaml | nindent 2 }} +{{- end }} +{{- end }} + +--- + +{{- if (has "windows" .Values.os) }} +apiVersion: opentelemetry.io/v1alpha1 +kind: OpenTelemetryCollector +metadata: + annotations: +{{- .Values.annotations | toYaml | nindent 4 }} + labels: +{{- include "appdynamics-otel-collector.finalLabelsDaemonset" . | nindent 4 }} + name: {{ include "appdynamics-otel-collector.daemonset.windows.fullname" .}} + namespace: {{ include "appdynamics-otel-collector.namespace" .}} +spec: +{{- include "appdynamics-otel-collector-daemonset-windows.spec" . | nindent 2 }} +{{- if .Values.status }} +status: +{{- .Values.status | toYaml | nindent 2 }} +{{- end }} +{{- end }} + +{{- end }} +{{- end }} \ No newline at end of file diff --git a/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-otel-collector/templates/opentelemetry-collector-sidecar.yaml b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-otel-collector/templates/opentelemetry-collector-sidecar.yaml new file mode 100644 index 00000000..0e0155c2 --- /dev/null +++ b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-otel-collector/templates/opentelemetry-collector-sidecar.yaml @@ -0,0 +1,35 @@ +{{- if .Values.install -}} +{{- if or (and .Values.presets.tailsampler.enable (contains "sidecar" .Values.presets.tailsampler.deploy_mode)) (or .Values.presets.multi_tier.sidecar.enable .Values.presets.multi_tier.sidecar.client_side_loadbalancing) }} + +{{- if (has "linux" .Values.os) }} +apiVersion: opentelemetry.io/v1alpha1 +kind: OpenTelemetryCollector +metadata: + name: {{ include "appdynamics-otel-collector.sidecar.fullname" .}} + namespace: {{ include "appdynamics-otel-collector.namespace" .}} +spec: +{{- include "appdynamics-otel-collector.sidecar.linux.spec" . | nindent 2 }} +{{- if .Values.status }} +status: +{{- .Values.status | toYaml | nindent 2 }} +{{- end }} +{{- end }} + +--- + +{{- if (has "windows" .Values.os) }} +apiVersion: opentelemetry.io/v1alpha1 +kind: OpenTelemetryCollector +metadata: + name: {{ include "appdynamics-otel-collector.sidecar.windows.fullname" .}} + namespace: {{ include "appdynamics-otel-collector.namespace" .}} +spec: +{{- include "appdynamics-otel-collector.sidecar.windows.spec" . | nindent 2 }} +{{- if .Values.status }} +status: +{{- .Values.status | toYaml | nindent 2 }} +{{- end }} +{{- end }} + +{{- end }} +{{- end }} \ No newline at end of file diff --git a/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-otel-collector/templates/opentelemetry-collector-statefulset.yaml b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-otel-collector/templates/opentelemetry-collector-statefulset.yaml new file mode 100644 index 00000000..ece492db --- /dev/null +++ b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-otel-collector/templates/opentelemetry-collector-statefulset.yaml @@ -0,0 +1,43 @@ +{{- if .Values.install -}} +{{- if .Values.enablePrometheus }} + +{{- if (has "linux" .Values.os) }} +apiVersion: opentelemetry.io/v1alpha1 +kind: OpenTelemetryCollector +metadata: + annotations: +{{- .Values.annotations | toYaml | nindent 4 }} + labels: +{{- include "appdynamics-otel-collector.finalLabelsStatefulset" . | nindent 4 }} + name: {{ include "appdynamics-otel-collector.statefulset.fullname" .}} + namespace: {{ include "appdynamics-otel-collector.namespace" .}} +spec: +{{- include "appdynamics-otel-collector-statefulset-linux.spec" . | nindent 2 }} +{{- if .Values.status }} +status: +{{- .Values.status | toYaml | nindent 2 }} +{{- end }} +{{- end }} + +--- + +{{- if (has "windows" .Values.os) }} +apiVersion: opentelemetry.io/v1alpha1 +kind: OpenTelemetryCollector +metadata: + annotations: +{{- .Values.annotations | toYaml | nindent 4 }} + labels: +{{- include "appdynamics-otel-collector.finalLabelsStatefulset" . | nindent 4 }} + name: {{ include "appdynamics-otel-collector.statefulset.windows.fullname" .}} + namespace: {{ include "appdynamics-otel-collector.namespace" .}} +spec: +{{- include "appdynamics-otel-collector-statefulset-windows.spec" . | nindent 2 }} +{{- if .Values.status }} +status: +{{- .Values.status | toYaml | nindent 2 }} +{{- end }} +{{- end }} + +{{- end }} +{{- end }} \ No newline at end of file diff --git a/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-otel-collector/templates/opentelemetry-collector-tailsampler.yaml b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-otel-collector/templates/opentelemetry-collector-tailsampler.yaml new file mode 100644 index 00000000..1ccad6db --- /dev/null +++ b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-otel-collector/templates/opentelemetry-collector-tailsampler.yaml @@ -0,0 +1,43 @@ +{{- if .Values.install -}} +{{- if and .Values.presets.tailsampler.enable (contains "sampler" .Values.presets.tailsampler.deploy_mode) }} + +{{- if (has "linux" .Values.presets.multi_tier.tailsampler.os) }} +apiVersion: opentelemetry.io/v1alpha1 +kind: OpenTelemetryCollector +metadata: + annotations: +{{- .Values.annotations | toYaml | nindent 4 }} + labels: +{{- include "appdynamics-otel-collector.tailsampler.finalLabels" . | nindent 4 }} + name: {{ include "appdynamics-otel-collector.tailsampler.fullname" .}} + namespace: {{ include "appdynamics-otel-collector.namespace" .}} +spec: +{{- include "appdynamics-otel-collector.tailsampler.linux.spec" . | nindent 2 }} +{{- if .Values.status }} +status: +{{- .Values.status | toYaml | nindent 2 }} +{{- end }} +{{- end }} + +--- + +{{- if (has "windows" .Values.presets.multi_tier.tailsampler.os) }} +apiVersion: opentelemetry.io/v1alpha1 +kind: OpenTelemetryCollector +metadata: + annotations: +{{- .Values.annotations | toYaml | nindent 4 }} + labels: +{{- include "appdynamics-otel-collector.tailsampler.finalLabels" . | nindent 4 }} + name: {{ include "appdynamics-otel-collector.tailsampler.windows.fullname" .}} + namespace: {{ include "appdynamics-otel-collector.namespace" .}} +spec: +{{- include "appdynamics-otel-collector.tailsampler.windows.spec" . | nindent 2 }} +{{- if .Values.status }} +status: +{{- .Values.status | toYaml | nindent 2 }} +{{- end }} +{{- end }} + +{{- end }} +{{- end }} \ No newline at end of file diff --git a/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-otel-collector/templates/rbac.yaml b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-otel-collector/templates/rbac.yaml new file mode 100644 index 00000000..1c63e347 --- /dev/null +++ b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-otel-collector/templates/rbac.yaml @@ -0,0 +1,33 @@ +{{ if and .Values.rbac.create .Values.install }} +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: {{ include "appdynamics-otel-collector.fullname" . }}-clusterrole + labels: + {{- include "appdynamics-otel-collector.labels" . | nindent 4 }} + namespace: {{ include "appdynamics-otel-collector.namespace" .}} +{{- with .Values.rbac.rules }} +rules: + {{- toYaml . | nindent 2 }} +{{- end }} + +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: {{ include "appdynamics-otel-collector.fullname" . }}-clusterrolebinding + namespace: {{ include "appdynamics-otel-collector.namespace" .}} + labels: + {{- include "appdynamics-otel-collector.labels" . | nindent 4 }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: {{ include "appdynamics-otel-collector.fullname" . }}-clusterrole +subjects: + - kind: ServiceAccount + {{- with $spec := include "appdynamics-otel-collector.valueServiceAccount" . | fromYaml }} + name: {{ index $spec "serviceAccount" }} + {{- end}} + namespace: {{ include "appdynamics-otel-collector.namespace" .}} + +{{ end }} \ No newline at end of file diff --git a/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-otel-collector/templates/service-account.yaml b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-otel-collector/templates/service-account.yaml new file mode 100644 index 00000000..561610d7 --- /dev/null +++ b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-otel-collector/templates/service-account.yaml @@ -0,0 +1,19 @@ +{{ if .Values.install -}} +{{- if and .Values.serviceAccount .Values.serviceAccount.create -}} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ include "appdynamics-otel-collector.serviceAccountName" . }} + labels: + {{- include "appdynamics-otel-collector.labels" . | nindent 4 }} + {{- with .Values.serviceAccount.annotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} + namespace: {{ include "appdynamics-otel-collector.namespace" .}} +{{- with .Values.serviceAccount.imagePullSecrets }} +imagePullSecrets: + {{- toYaml . | nindent 2 }} +{{- end }} +{{- end }} +{{- end }} \ No newline at end of file diff --git a/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-otel-collector/templates/service.yaml b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-otel-collector/templates/service.yaml new file mode 100644 index 00000000..035bc579 --- /dev/null +++ b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-otel-collector/templates/service.yaml @@ -0,0 +1,132 @@ +{{- if .Values.install -}} +{{/*Global Servce*/}} +apiVersion: v1 +kind: Service +metadata: + labels: +{{- include "appdynamics-otel-collector.labels" . | nindent 4 }} + name: {{ .Values.service.name }} + namespace: {{ include "appdynamics-otel-collector.namespace" .}} +spec: + selector: + app.kubernetes.io/component: opentelemetry-collector +{{- include "appdynamics-otel-collector.gateway.selectorLabels" . | nindent 4 }} + type: {{ .Values.service.type }} + ports: +{{- .Values.service.ports | toYaml | nindent 2}} +{{- if .Values.service.clusterIP }} + clusterIP: {{ .Values.service.clusterIP }} +{{- end }} +{{- if .Values.nodeLocalTrafficMode }} + internalTrafficPolicy: {{ .Values.service.internalTrafficPolicy }} +{{- end }} + +--- + +{{/*Global Servce - headless */}} +{{- if .Values.presets.multi_tier.sidecar.client_side_loadbalancing }} +apiVersion: v1 +kind: Service +metadata: + labels: +{{- include "appdynamics-otel-collector.labels" . | nindent 4 }} + name: {{ .Values.service.name -}}-headless + namespace: {{ include "appdynamics-otel-collector.namespace" .}} +spec: + selector: + app.kubernetes.io/component: opentelemetry-collector +{{- include "appdynamics-otel-collector.gateway.selectorLabels" . | nindent 4 }} + type: {{ .Values.service.type }} +{{- include "appdynamics-otel-collector.valueServicePorts" . | indent 2 }} + clusterIP: None +{{- end }} + +--- + +{{/*Sampler Service*/}} +{{- if .Values.presets.tailsampler.enable }} +apiVersion: v1 +kind: Service +metadata: + labels: +{{- include "appdynamics-otel-collector.labels" . | nindent 4 }} + name: {{ .Values.presets.tailsampler.service.name }} + namespace: {{ include "appdynamics-otel-collector.namespace" .}} +spec: + selector: + app.kubernetes.io/component: opentelemetry-collector +{{- include "appdynamics-otel-collector.tailsampler.selectorLabels" . | nindent 4 }} + type: {{ .Values.presets.tailsampler.service.type }} + ports: +{{- .Values.presets.tailsampler.service.ports | toYaml | nindent 2}} +{{- if .Values.presets.tailsampler.service.clusterIP }} + clusterIP: {{ .Values.presets.tailsampler.service.clusterIP }} +{{- end }} +{{- end }} + +--- + +{{/*Daemonset Servce*/}} +{{/*Only enabled when both deployments are required*/}} +{{- if and .Values.enablePrometheus .Values.enableFileLog -}} +apiVersion: v1 +kind: Service +metadata: + labels: +{{- include "appdynamics-otel-collector.labels" . | nindent 4 }} + name: {{ .Values.service.name }}-ds + namespace: {{ include "appdynamics-otel-collector.namespace" .}} +spec: + selector: + app.kubernetes.io/component: opentelemetry-collector +{{- include "appdynamics-otel-collector.selectorLabelsDaemonset" . | nindent 4 }} + type: {{ .Values.service.type }} + ports: +{{- .Values.service.ports | toYaml | nindent 2}} +{{- if .Values.service.clusterIP }} + clusterIP: {{ .Values.service.clusterIP }} +{{- end }} +{{- end }} + +--- + +{{/*Statefulset Servce*/}} +{{/*Only enabled when both deployments are required*/}} +{{- if and .Values.enablePrometheus .Values.enableFileLog -}} +apiVersion: v1 +kind: Service +metadata: + labels: +{{- include "appdynamics-otel-collector.labels" . | nindent 4 }} + name: {{ .Values.service.name }}-ss + namespace: {{ include "appdynamics-otel-collector.namespace" .}} +spec: + selector: + app.kubernetes.io/component: opentelemetry-collector +{{- include "appdynamics-otel-collector.selectorLabelsStatefulset" . | nindent 4 }} + type: {{ .Values.service.type }} + ports: +{{- .Values.service.ports | toYaml | nindent 2}} +{{- if .Values.service.clusterIP }} + clusterIP: {{ .Values.service.clusterIP }} +{{- end }} +{{- end }} + +--- + +{{/*Custom Servce*/}} +{{/*Used for create services that the name is configurable*/}} +{{- if ((.Values.global.customService).enable | default .Values.customService.enable) -}} +apiVersion: v1 +kind: Service +metadata: + labels: +{{- include "appdynamics-otel-collector.labels" . | nindent 4 }} + name: {{ (.Values.global.customService).name | default .Values.customService.name | required "customService.name must be non empty when enabled" }} + namespace: {{ (.Values.global.customService).namespace | default .Values.customService.namespace }} +spec: + type: ExternalName + externalName: {{.Values.service.name}}.{{ include "appdynamics-otel-collector.namespace" .}}.svc.cluster.local +{{- end }} + +{{- end}} \ No newline at end of file diff --git a/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-otel-collector/templates/target-allocator-rbac.yaml b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-otel-collector/templates/target-allocator-rbac.yaml new file mode 100644 index 00000000..badddc60 --- /dev/null +++ b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-otel-collector/templates/target-allocator-rbac.yaml @@ -0,0 +1,39 @@ +{{ if .Values.install -}} +{{- if and .Values.enablePrometheus (and .Values.targetAllocatorServiceAccount .Values.targetAllocatorServiceAccount.create) }} + +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: appd-otel-collector-target-allocator-role + namespace: {{ include "appdynamics-otel-collector.namespace" . }} +rules: +- apiGroups: + - '' + resources: + - 'pods' + - 'nodes' + - 'endpoints' + - 'services' + verbs: + - 'get' + - 'list' + - 'watch' + + +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: appd-otel-collector-target-allocator-rolebinding + namespace: {{ include "appdynamics-otel-collector.namespace" .}} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: appd-otel-collector-target-allocator-role +subjects: + - kind: ServiceAccount + name: {{ include "appdynamics-otel-collector.valueTargetAllocatorServiceAccount" . }} + namespace: {{ include "appdynamics-otel-collector.namespace" .}} + +{{ end }} +{{- end }} diff --git a/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-otel-collector/templates/target-allocator-service-account.yaml b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-otel-collector/templates/target-allocator-service-account.yaml new file mode 100644 index 00000000..14b6fde6 --- /dev/null +++ b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-otel-collector/templates/target-allocator-service-account.yaml @@ -0,0 +1,17 @@ +{{ if .Values.install -}} +{{- if and .Values.enablePrometheus (and .Values.targetAllocatorServiceAccount .Values.targetAllocatorServiceAccount.create) -}} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ include "appdynamics-otel-collector.valueTargetAllocatorServiceAccount" . }} + namespace: {{ include "appdynamics-otel-collector.namespace" .}} + {{- with .Values.targetAllocatorServiceAccount.annotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} +{{- with .Values.targetAllocatorServiceAccount.imagePullSecrets }} +imagePullSecrets: + {{- toYaml . | nindent 2 }} +{{- end }} +{{- end }} +{{- end }} \ No newline at end of file diff --git a/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-otel-collector/templates/tests/simple-test-loadgen.yaml b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-otel-collector/templates/tests/simple-test-loadgen.yaml new file mode 100644 index 00000000..055e9d15 --- /dev/null +++ b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-otel-collector/templates/tests/simple-test-loadgen.yaml @@ -0,0 +1,20 @@ +apiVersion: batch/v1 +kind: Job +metadata: + name: loadgen + annotations: + "helm.sh/hook": test +spec: + template: + spec: + nodeSelector: + kubernetes.io/os: "linux" + containers: + - name: loadgen-grpc + image: ghcr.io/open-telemetry/opentelemetry-collector-contrib/tracegen:latest + command: ["/tracegen", "-otlp-endpoint=appdynamics-otel-collector-service:4317", "-otlp-insecure=true"] + - name: loadgen-http + image: ghcr.io/open-telemetry/opentelemetry-collector-contrib/tracegen:latest + command: ["/tracegen", "-otlp-endpoint=appdynamics-otel-collector-service:4318", "-otlp-insecure=true", "-otlp-http=true"] + restartPolicy: Never + backoffLimit: 4 \ No newline at end of file diff --git a/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-otel-collector/values.schema.json b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-otel-collector/values.schema.json new file mode 100644 index 00000000..7c3cc35f --- /dev/null +++ b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-otel-collector/values.schema.json @@ -0,0 +1,707 @@ +{ + "$schema": "http://json-schema.org/schema#", + "type": "object", + "title": "Values", + "additionalProperties": false, + "properties": { + "global": { + "type": "object", + "properties": { + "clusterName": { + "description": "The name for cluster where the collectors and target allocator are deployed, required when enablePrometheus set to true", + "type": "string" + }, + "clusterId": { + "description": "The uid of kube-system namespace, required when helm lookup is not supported and enablePrometheus/selfTelemetry is set to true", + "type": "string" + }, + "customService": { + "description": "A custom service which its name and namespace can be configured for user application. Shared with instrumentation chart, override .customerService.", + "type": "object", + "additionalProperties": false, + "properties": { + "enable" : { + "description": "Whether the custom service will be created, false by default", + "type": "boolean" + }, + "name": { + "description": "The name for the custom service", + "type": "string" + }, + "namespace": { + "description": "The namespace for the custom service", + "type": "string" + } + } + } + } + }, + "install": { + "description": "Install flag for Otel, 'true' will install/upgrade it, 'false' will not install/uninstall it", + "type": "boolean" + }, + "nameOverride": { + "description": "Override name of the chart used in Kubernetes object names.", + "type": "string" + }, + "fullnameOverride": { + "description": "Override fully qualified app name.", + "type": "string" + }, + "clientId": { + "description": "AppDynamics oauth2 client id", + "type": "string" + }, + "clientSecret": { + "description": "AppDynamics oauth2 client secret plain text.", + "type": ["string"] + }, + "clientSecretEnvVar": { + "description": "AppDynamics oauth2 client secret environment variable.", + "type": ["object"] + }, + "clientSecretVolume": { + "description": "AppDynamics oauth2 client secret volume.", + "type": ["object"] + }, + "tenantId": { + "description": "Tenant id, if not provided, the chart will try to extract the tenantId from tokenUrl.", + "type": ["string"] + }, + "tokenUrl": { + "description": "AppDynamics oauth2 token refreshurl", + "type": "string" + }, + "endpoint": { + "description": "AppDynamics otlp endpoint url", + "type": "string" + }, + "spec": { + "type": "object", + "description": "The spec section of OpenTelemetry Operator. Refer to https://github.com/open-telemetry/opentelemetry-operator/blob/main/docs/api.md#opentelemetrycollectorspec for possible properties" + }, + "traceContextPropagation": { + "type": "boolean", + "description": "to enable/disbale traceparent header propagation for export requests made by collector." + }, + "status": { + "type": "object", + "description": "The status section of OpenTelemetry Operator. Refer to https://github.com/open-telemetry/opentelemetry-operator/blob/main/docs/api.md#opentelemetrycollectorstatus for possible properties" + }, + "config": { + "description": "OpenTelemetry Collector Configuration. It is the recommended way to set the collector config. If spec.config is set, this property won't take effect. Refer to the OpenTelemetry Collector documentation for details.", + "type": "object" + }, + "configOverride": { + "description": "Any additional OpenTelemetry Collector Configuration for the enabled configuration. We can also use this field to remove/add new components to the pipelines", + "type": "object" + }, + "sendChartInfo": { + "description": "sendChartInfo when set to true, will add the chart name and version to the http headers for sending to AppDynamics Cloud for debugging purpose. It is false by default", + "type": "boolean" + }, + "serviceAccount": { + "description": "ServiceAccount applied by this chart, disable it by setting create to false.", + "type": "object", + "additionalProperties": false, + "properties": { + "create": { + "type": "boolean" + }, + "annotations": { + "type": "object" + }, + "name": { + "type": "string" + }, + "imagePullSecrets": { + "type": "array" + } + }, + "required": [ + "create" + ] + }, + "service": { + "description": "service expose collector for external traffics.", + "type": "object", + "additionalProperties": false, + "properties": { + "name": { + "type": "string" + }, + "type": { + "type": "string", + "enum": [ + "ClusterIP", + "NodePort", + "LoadBalancer", + "ExternalName" + ] + }, + "ports": { + "type": "array" + }, + "clusterIP": { + "type": "string" + }, + "internalTrafficPolicy": { + "type": "string" + } + } + }, + "rbac": { + "description": "RBAC rules associated with the service account. If created, a Role will be created and bind with the collector service account", + "type": "object", + "additionalProperties": false, + "properties": { + "rules": { + "type": "array" + }, + "create": { + "type": "boolean" + } + }, + "required": [ + "create" + ] + }, + "selfTelemetry": { + "description": "Open telemetry collector metrics", + "type": "boolean" + }, + "selfTelemetryServiceName": { + "description": "Open telemetry collector service name", + "type": "string" + }, + "selfTelemetryServiceNamespace": { + "description": "Open telemetry collector service namespace", + "type": "string" + }, + "enablePrometheus": { + "description": "Enable the prometheus related deployment, it will deploy a target allocator and change collector replica mode to be statfulset.", + "type": "boolean" + }, + "setPodUID":{ + "description": "setPodUID when set to true will set pod uid to the collector self-telemetry resrouce.", + "type": "boolean" + }, + "targetAllocatorServiceAccount": { + "description": "ServiceAccount for target allocator, only enable when enablePrometheus equals true, disable it by setting create to false.", + "type": "object", + "additionalProperties": false, + "properties": { + "create": { + "type": "boolean" + }, + "annotations": { + "type": "object" + }, + "name": { + "type": "string" + }, + "imagePullSecrets": { + "type": "array" + } + }, + "required": [ + "create" + ] + }, + "os": { + "description": "Choose the os type for otel collector to run, useful when the cluster has mixed os types.", + "type": "array" + }, + "env":{ + "description": "os specific otel collector spec.", + "type": "object", + "additionalProperties": false, + "properties": { + "linux": { + "type": "object", + "description": "The spec and configuration override objects for Linux deployments of the Cisco AppDynamics Distribution of OpenTelemetry Collector. It will overrides the configs from mode.statefulset and mode.daemonset", + "additionalProperties": false, + "properties": { + "spec": { + "type": "object", + "description": "The desired state of the Cisco AppDynamics Distribution of OpenTelemetry Collector. For the full list of settings, see https://github.com/open-telemetry/opentelemetry-operator/blob/main/docs/api.md#opentelemetrycollectorspec" + }, + "configOverride": { + "type": "object", + "description": "The configurations for the Cisco AppDynamics Distribution of OpenTelemetry Collector. This environment variable will override the default configurations provided by the Helm chart values. For detailed configurations, see https://opentelemetry.io/docs/collector/configuration/." + }, + "mode": { + "type": "object", + "description": "The spec and configuration override objects for the Linux deployment modes of the Cisco AppDynamics Distribution of OpenTelemetry Collector.", + "properties": { + "daemonset": { + "type": "object", + "additionalProperties": false, + "properties": { + "spec": { + "type": "object", + "description": "The desired state of the Cisco AppDynamics Distribution of OpenTelemetry Collector. For the full list of settings, see https://github.com/open-telemetry/opentelemetry-operator/blob/main/docs/api.md#opentelemetrycollectorspec" + }, + "configOverride": { + "type": "object", + "description": "The configurations for the Cisco AppDynamics Distribution of OpenTelemetry Collector. This environment variable will override the default configurations provided by the Helm chart values. For detailed configurations, see https://opentelemetry.io/docs/collector/configuration/." + } + } + }, + "statefulset": { + "type": "object", + "additionalProperties": false, + "properties": { + "spec": { + "type": "object", + "description": "The desired state of the Cisco AppDynamics Distribution of OpenTelemetry Collector. For the full list of settings, see https://github.com/open-telemetry/opentelemetry-operator/blob/main/docs/api.md#opentelemetrycollectorspec" + }, + "configOverride": { + "type": "object", + "description": "The configurations for the Cisco AppDynamics Distribution of OpenTelemetry Collector. This environment variable will override the default configurations provided by the Helm chart values. For detailed configurations, see https://opentelemetry.io/docs/collector/configuration/." + } + } + } + } + } + } + }, + "windows": { + "type": "object", + "description": "The spec and configuration override objects for Windows deployments of the Cisco AppDynamics Distribution of OpenTelemetry Collector. It will overrides the configs from mode.statefulset and mode.daemonset", + "additionalProperties": false, + "properties": { + "spec": { + "type": "object", + "description": "The desired state of the Cisco AppDynamics Distribution of OpenTelemetry Collector. For the full list of settings, see https://github.com/open-telemetry/opentelemetry-operator/blob/main/docs/api.md#opentelemetrycollectorspec" + }, + "configOverride": { + "type": "object", + "description": "The configurations for the Cisco AppDynamics Distribution of OpenTelemetry Collector. This environment variable will override the default configurations provided by the Helm chart values. For detailed configurations, see https://opentelemetry.io/docs/collector/configuration/." + }, + "mode": { + "type": "object", + "description": "The spec and configuration override objects for the Windows deployment modes of the Cisco AppDynamics Distribution of OpenTelemetry Collector.", + "properties": { + "daemonset": { + "type": "object", + "additionalProperties": false, + "properties": { + "spec": { + "type": "object", + "description": "The desired state of the Cisco AppDynamics Distribution of OpenTelemetry Collector. For the full list of settings, see https://github.com/open-telemetry/opentelemetry-operator/blob/main/docs/api.md#opentelemetrycollectorspec" + }, + "configOverride": { + "type": "object", + "description": "The configurations for the Cisco AppDynamics Distribution of OpenTelemetry Collector. This environment variable will override the default configurations provided by the Helm chart values. For detailed configurations, see https://opentelemetry.io/docs/collector/configuration/." + } + } + }, + "statefulset": { + "type": "object", + "additionalProperties": false, + "properties": { + "spec": { + "type": "object", + "description": "The desired state of the Cisco AppDynamics Distribution of OpenTelemetry Collector. For the full list of settings, see https://github.com/open-telemetry/opentelemetry-operator/blob/main/docs/api.md#opentelemetrycollectorspec" + }, + "configOverride": { + "type": "object", + "description": "The configurations for the Cisco AppDynamics Distribution of OpenTelemetry Collector. This environment variable will override the default configurations provided by the Helm chart values. For detailed configurations, see https://opentelemetry.io/docs/collector/configuration/." + } + } + } + } + } + } + } + } + }, + "annotations":{ + "description": "user provided annotations.", + "type": "object" + }, + "labels":{ + "description": "user provided labels.", + "type": "object" + }, + "mode":{ + "description": "spec and config override for different deployment modes", + "type": "object", + "additionalProperties": false, + "properties": { + "daemonset": { + "type": "object", + "description": "The spec and configuration override objects for DaemonSet deployments of the Cisco AppDynamics Distribution of OpenTelemetry Collector.", + "properties": { + "spec": { + "type": "object", + "description": "The desired state of the Cisco AppDynamics Distribution of OpenTelemetry Collector. For the full list of settings, see https://github.com/open-telemetry/opentelemetry-operator/blob/main/docs/api.md#opentelemetrycollectorspec" + }, + "configOverride": { + "type": "object", + "description": "The configurations for the Cisco AppDynamics Distribution of OpenTelemetry Collector. This environment variable will override the default configurations provided by the Helm chart values. For detailed configurations, see https://opentelemetry.io/docs/collector/configuration/." + } + } + }, + "statefulset": { + "type": "object", + "description": "the spec and configuration override objects for StatefulSet deployments of the Cisco AppDynamics Distribution of OpenTelemetry Collector.", + "properties": { + "spec": { + "type": "object", + "description": "The desired state of the Cisco AppDynamics Distribution of OpenTelemetry Collector. For the full list of settings, see https://github.com/open-telemetry/opentelemetry-operator/blob/main/docs/api.md#opentelemetrycollectorspec" + }, + "configOverride": { + "type": "object", + "description": "The configurations for the Cisco AppDynamics Distribution of OpenTelemetry Collector. This environment variable will override the default configurations provided by the Helm chart values. For detailed configurations, see https://opentelemetry.io/docs/collector/configuration/." + } + } + } + } + }, + "enableFileLog":{ + "description": "enable filelog receiver deployment.", + "type": "boolean" + }, + "filelogReceiverConfig":{ + "description": "filelogReceiverConfig specify configs for filelog receiver.", + "type": "object", + "additionalProperties": false, + "properties": { + "includeLogsPath": { + "type": "array" + }, + "excludeLogsPath": { + "type": "array" + }, + "messageParserPattern": { + "type": "string" + }, + "messageParserType": { + "type": "string" + } + } + }, + "presets":{ + "description": "presets specify a list of pre-configured fuctions.", + "type": "object", + "additionalProperties": false, + "properties": { + "truncated": { + "description": "truncated specify max attributes value length.", + "type": "object", + "truncated": { + "description": "configurations to truncate attributes for OpenTelemetry signals.", + "type": "object", + "additionalProperties": false, + "properties": { + "trace": { + "description": "configurations to truncate attributes for trace signal.", + "type": "object", + "additionalProperties": false, + "properties": { + "resource": { + "description": "max length for trace resouce attribute", + "type": "integer" + }, + "scope": { + "description": "max length for trace scope attribute", + "type": "integer" + }, + "span": { + "description": "max length for span attribute", + "type": "integer" + }, + "spanevent": { + "type": "integer" + } + } + }, + "metric": { + "type": "object", + "additionalProperties": false, + "properties": { + "resource": { + "description": "max length for metric resouce attribute", + "type": "integer" + }, + "scope": { + "description": "max length for trace scope attribute", + "type": "integer" + }, + "datapoint": { + "description": "max length for metric datapoint attribute", + "type": "integer" + } + } + }, + "log": { + "type": "object", + "additionalProperties": false, + "properties": { + "resource": { + "description": "max length for log resouce attribute", + "type": "integer" + }, + "scope": { + "description": "max length for log scope attribute", + "type": "integer" + }, + "log": { + "description": "max length for log attribute", + "type": "integer" + } + } + } + } + } + }, + "multi_tier": { + "description": "multi-tier configurations for different deployments of AppDynamics Distribution of OpenTelemetry Collector", + "type": "object", + "tailsampler": { + "description": "configurations for tail sampling collector", + "type": "object", + "properties": { + "enable": { + "description": "whether the tail sampling collector should be deployed, default false. Tail sampling collector will be deployed automatically when presets.tailsampler.enable set to true and this config will be ignored.", + "type": "boolean" + }, + "os": { + "description": "The operating system that tail sampling collector should be deployed, [linux] by default. If you also want to deploy tail sampling collector to windows, you could change it to [linux, windows]", + "type": "array" + }, + "env": { + "type": "object", + "additionalProperties": false, + "properties": { + "linux": { + "type": "object", + "additionalProperties": false, + "properties": { + "spec": { + "description": "The spec section of OpenTelemetry Operator. Refer to https://github.com/open-telemetry/opentelemetry-operator/blob/main/docs/api.md#opentelemetrycollectorspec for possible properties", + "type": "object" + }, + "configOverride": { + "description": "Any additional OpenTelemetry Collector Configuration for the enabled configuration. We can also use this field to remove/add new components to the pipelines", + "type": "object" + } + } + }, + "windows": { + "type": "object", + "additionalProperties": false, + "properties": { + "spec": { + "description": "The spec section of OpenTelemetry Operator. Refer to https://github.com/open-telemetry/opentelemetry-operator/blob/main/docs/api.md#opentelemetrycollectorspec for possible properties", + "type": "object" + }, + "configOverride": { + "description": "Any additional OpenTelemetry Collector Configuration for the enabled configuration. We can also use this field to remove/add new components to the pipelines", + "type": "object" + } + } + } + } + }, + "spec": { + "description": "The spec section of OpenTelemetry Operator. Refer to https://github.com/open-telemetry/opentelemetry-operator/blob/main/docs/api.md#opentelemetrycollectorspec for possible properties", + "type": "object" + }, + "config": { + "description": "OpenTelemetry Collector Configuration. It is the recommended way to set the collector config. If spec.config is set, this property won't take effect. Refer to the OpenTelemetry Collector documentation for details.", + "type": "object" + }, + "configOverride": { + "description": "Any additional OpenTelemetry Collector Configuration for the enabled configuration. We can also use this field to remove/add new components to the pipelines", + "type": "object" + } + } + } + }, + "presampler": { + "description": "presampler config", + "type": "object" + }, + "samplerDebug": { + "description": "samplerDebug config", + "type": "object" + }, + "tailsampler": { + "description": "Configs for tail sampling", + "type": "object", + "properties": { + "enable": { + "description": "Whether tail sampling should be used, default to false", + "type": "boolean" + }, + "deploy_mode": { + "description": "How tail sampling should be deployed, for now only gateway_sampler is available, which means the default gateway_sampler will receive the trace and forward to tail sampling collector for sampling", + "type": "string" + }, + "replicas": { + "description": "The number of tail sampling collector should be deployed.", + "type": "integer" + }, + "service": { + "description": "The tail sampling collector service configs", + "type": "object", + "properties": { + "name": { + "description": "The name of the tail sampling collector, default to be appdynamics-otel-collector-sampler-service", + "type": "string" + }, + "type": { + "type": "string", + "enum": [ + "ClusterIP", + "NodePort", + "LoadBalancer", + "ExternalName" + ] + }, + "ports": { + "type": "array" + } + } + }, + "pipeline": { + "description": "The trace processor pipeline for the tail sampling collector, this is where the user can drop or add a certain sampler", + "type": "array" + }, + "trace_classification_and_sampling": { + "description": "Configs for trace_classification_and_sampling processor.", + "type": "object", + "properties": { + "decision_wait": { + "type": "string" + }, + "policies": { + "type": "array" + }, + "samplers": { + "type": "object", + "properties": { + "export_period": { + "type": "string" + }, + "consistent_reservoir_sampler": { + "type": "object" + } + } + } + } + }, + "consistent_proportional_sampler": { + "description": "Configs for consistent_proportional_sampler processor", + "type": "object", + "properties": { + "export_period": { + "type": "string" + }, + "spans_per_period": { + "type": "integer" + }, + "exponential_smooth": { + "type": "number" + }, + "initial_estimate_rate": { + "type": "integer" + } + } + }, + "intermediate_sampler": { + "description": "Configs for intermediate_sampler processor", + "type": "object", + "properties": { + "export_period": { + "type": "string" + }, + "size_limit": { + "type": "integer" + }, + "size_limit_type": { + "type": "string" + }, + "initial_estimate_rate": { + "type": "integer" + } + } + } + } + }, + "selfTelemetry": { + "description": "self-telemetry config", + "type": "object", + "additionalProperties": false, + "properties": { + "exporters": { + "description": "exporters for self telemetry, default otlphttp: {}. By default self telemetry is sent with other metrics together to AppDynamics, the user may add or replace the exporters here to let the self telemetry be sent to other places. Note when a exporter with empty config need to be added, please use {} as value, for example logging: {} instead of logging: null or just logging: ", + "type": "object" + } + } + } + } + }, + "disableOpamp": { + "description": "disable agent management extension communication. By setting this to true, the user can enable agent management extension without connecting it with the platform, the collector can still report self telemetry by setting agentManagementSelfTelemetry to true", + "type": "boolean" + }, + "agentManagement": { + "description": "Enable agent management extension", + "type": "boolean" + }, + "agentManagementSelfTelemetry": { + "description": "Enable agent management extension based self telemetry, it will report the same set of metrics by enabling selfTelemetry, but it will have different resource attribute and use different solution", + "type": "boolean" + }, + "enableNetworkMonitoring": { + "description": "enable the Network Monitoring related receiver & processors", + "type": "boolean" + }, + "enabled": { + "description": "", + "type": "boolean" + }, + "customService": { + "description": "A custom service which its name and namespace can be configured for user application. Note that the default service name, i.e. appdynamics-otel-collector-service, is used by other AppDynamics collectors and shall not be changed", + "type": "object", + "additionalProperties": false, + "properties": { + "enable" : { + "description": "Whether the custom service will be created, false by default", + "type": "boolean" + }, + "name": { + "description": "The name for the custom service", + "type": "string" + }, + "namespace": { + "description": "The namespace for the custom service", + "type": "string" + } + } + }, + "nodeLocalTrafficMode": { + "description": "when node local traffic node is enabled, the otel collector will only handle traffic from local node and k8sattributes processor will only watch the local node resource, it will save the resources cost by k8sattributes processor, the mode shall not be turned on when enablePrometheus set to true and enableFileLog set to false", + "type": "boolean" + }, + "useGOMEMLIMIT": { + "description": "when useGOMEMLIMIT turned on, GOMEMLIMIT environment variable will be added, with value of 80% of the memory limit, GOMEMLIMIT help protect otel collector from OOM, but may cause more GC and cpu consumption.", + "type": "boolean" + } + }, + "required": [ + "clientId", + "tokenUrl", + "endpoint", + "install" + ], + "anyOf": [ + {"required" : ["clientSecret"]}, + {"required" : ["clientSecretEnvVar"]}, + {"required" : ["clientSecretVolume"]} + ] +} \ No newline at end of file diff --git a/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-otel-collector/values.yaml b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-otel-collector/values.yaml new file mode 100644 index 00000000..f6430c59 --- /dev/null +++ b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-otel-collector/values.yaml @@ -0,0 +1,526 @@ +nameOverride: "" +fullnameOverride: "" + +install: true +enabled: true + +# data endpoint +endpoint: "" + +#oauth2client required settings +clientId: "" +clientSecretEnvVar: {} +tokenUrl: "" + +labels: {} + +annotations: + "prometheus.io/scrape": "false" + +global: + clusterName: "" + tls: + otelReceiver: + secret: {} + settings: {} + otelExporter: + secret: {} + settings: {} + +# spec is the spec section for opentelemetry collector CRD defined by opentelemetry operator. +spec: + image: appdynamics/appdynamics-cloud-otel-collector:24.7.0-1639 + # based on the perf testing + resources: + limits: + cpu: 200m + memory: 1Gi + requests: + cpu: 10m + memory: 256Mi + + +# status is the status section for opentelemtry operator +status: {} + +# config is the Collector configs. Other resources configs are in seperated sections below. +config: + extensions: + health_check: + endpoint: 0.0.0.0:13133 + zpages: + endpoint: 0.0.0.0:55679 + + receivers: + otlp: + protocols: + grpc: + endpoint: 0.0.0.0:4317 + http: + endpoint: 0.0.0.0:4318 + otlp/lca: + protocols: + grpc: + endpoint: 0.0.0.0:14317 + http: + endpoint: 0.0.0.0:14318 + + processors: + # https://github.com/open-telemetry/opentelemetry-collector/blob/main/processor/batchprocessor/README.md + batch: + send_batch_size: 1000 + timeout: 10s + send_batch_max_size: 1000 + batch/traces: + send_batch_size: 1000 + timeout: 10s + send_batch_max_size: 1000 + batch/metrics: + send_batch_size: 1000 + timeout: 10s + send_batch_max_size: 1000 + batch/logs: + send_batch_size: 1000 + timeout: 10s + send_batch_max_size: 1000 + filter/appd: + logs: + include: + match_type: strict + resource_attributes: + - key: telemetry.sdk.name + value: infra-agent + filter/non_appd: + logs: + exclude: + match_type: strict + resource_attributes: + - key: telemetry.sdk.name + value: infra-agent + filter/jvm: + metrics: + metric: + - 'name == "jvm.gc.duration"' + transform/truncate: + transform/jvmmetric: + metric_statements: + - context: metric + statements: + - extract_count_metric(true) where name == "jvm.gc.duration" + - extract_sum_metric(true) where name == "jvm.gc.duration" + - set(unit, "ms") where name == "jvm.gc.duration_sum" + - set(description, "Time spent in a given JVM garbage collector in milliseconds.") where name == "jvm.gc.duration_sum" + - set(unit, "{collections}") where name == "jvm.gc.duration_count" + - set(description, "The number of collections that have occurred for a given JVM garbage collector.") where name == "jvm.gc.duration_count" + metricstransform/jvmdatapoint: + transforms: + - include: ^jvm\.gc\.(duration_count|duration_sum) + match_type: regexp + action: update + operations: + - action: update_label + label: "jvm.gc.action" + new_label: "action" + - action: update_label + label: "jvm.gc.name" + new_label: "gc" + - include: "jvm.gc.duration_sum" + match_type: strict + action: update + new_name: "runtime.jvm.gc.time" + operations: + - action: experimental_scale_value + experimental_scale: 1000 + - action: toggle_scalar_data_type + - include: "jvm.gc.duration_count" + match_type: strict + action: update + new_name: "runtime.jvm.gc.count" + batchbybytesize: {} + + exporters: + otlphttp: + auth: + authenticator: oauth2client + + logging: + verbosity: detailed + + debug: + verbosity: detailed + + service: + extensions: [health_check, oauth2client] + pipelines: + metrics: + receivers: [otlp] + processors: [memory_limiter, transform/jvmmetric, filter/jvm, metricstransform/jvmdatapoint, transform/truncate, batch/metrics, batchbybytesize] # according to doc, "The memory_limiter processor should be the 1st processor configured in the pipeline (immediately after the receivers)." + exporters: [otlphttp] + traces: + receivers: [otlp] + processors: [memory_limiter, k8sattributes, transform/truncate, batch/traces, batchbybytesize] + exporters: [otlphttp] + logs: + receivers: [otlp] + processors: [memory_limiter, filter/non_appd, k8sattributes/logs, transform/logs, transform/truncate, batch/logs, batchbybytesize] + exporters: [otlphttp] + logs/appd: + receivers: [otlp] + processors: [memory_limiter, filter/appd, batch] + exporters: [otlphttp] + logs/lca: + receivers: [otlp/lca] + processors: [memory_limiter] + exporters: [otlphttp] + +# extra otel collector configuration +configOverride: {} + +# service expose collector for external traffics. +service: + name: "appdynamics-otel-collector-service" + ports: + - name: http + port: 4318 + protocol: TCP + targetPort: 4318 + - name: grpc + port: 4317 + protocol: TCP + targetPort: 4317 + - name: grpc-lca + port: 14317 + protocol: TCP + targetPort: 14317 + - name: grpc-ebpf + port: 24317 + protocol: TCP + targetPort: 24317 + type: ClusterIP + internalTrafficPolicy: Local + +customService: + enable: false + name: "" + namespace: "" + +# serviceAccount is the serviceAccount associated with the collector, set serviceAccount.create to false if you don't need it +serviceAccount: + annotations: {} + create: true + imagePullSecrets: [] + #name: default to be the full name + +rbac: + create: true + rules: + # k8sattributes prcoessor needed rules. + - apiGroups: [""] + resources: ["pods", "namespaces", "endpoints"] + verbs: ["get", "watch", "list"] + - apiGroups: ["apps"] + resources: ["replicasets"] + verbs: ["get", "watch", "list"] + +# tracecontext propgation +traceContextPropagation: true + +# enablePrometheus enable the prometheus related deployment, it will deploy a target allocator and a statefulset. +enablePrometheus: false +# enablePrometheus enable the filelog, it will deploy a daemonset to collector logs on each host. +enableFileLog: false +# disable agent management Opamp communication +disableOpamp: false +# Enable agent management extension +agentManagement: true +# Collector self telemetry +agentManagementSelfTelemetry: false +# Collector self telemetry, will be deprecated in Jan 2024 +selfTelemetry: false +# enableNetworkMonitoring enables the Network Monitoring related receiver & processors +enableNetworkMonitoring: false +# when node local traffic node is enabled, the otel collector will only handle traffic from local node and k8sattributes processor will only watch local resource +# it will save the resources cost by k8sattributes processor +nodeLocalTrafficMode: false +# when useGOMEMLIMIT turned on, GOMEMLIMIT environment variable will be added, with value of 80% of the memory limit, GOMEMLIMIT help protect otel collector from OOM, but may cause more GC and cpu consumption. +useGOMEMLIMIT: true + +# targetAllocatorServiceAccount only enabled when enablePrometheus=true, +# It will create a service account with a cluster role that have necessary permissions for the allocator to run. +targetAllocatorServiceAccount: + annotations: {} + create: true + imagePullSecrets: [] + #name: default to be the collector name with "-target-allocator" suffix, e.g. "my-collector-target-allocator" + + +# deployment mode specific spec and config overrides. +mode: + statefulset: + spec: + mode: statefulset + daemonset: + spec: + mode: daemonset + +# OS specific spec and config overrides. +os: [linux] +env: + linux: + spec: + nodeSelector: + kubernetes.io/os: "linux" + # mode: + # statefulset: deployment and OS specific spec/config overrides + windows: + spec: + nodeSelector: + kubernetes.io/os: "windows" + livenessProbe: + initialDelaySeconds: 5 + +presets: + samplerDebug: + enable: false + config: + extensions: + appd_data_router: {} + processors: + tracerecord/received: + appd_router_ext: appd_data_router + tracerecord/sampled: + appd_router_ext: appd_data_router + receivers: + appdeventsreceiver: + appd_router_ext: appd_data_router + service: + extensions: [health_check, oauth2client, appd_data_router] + pipelines: + logs/sampler_debug: + receivers: [appdeventsreceiver] + exporters: [otlphttp] + presampler: + enable: false + deploy_mode: gateway # sidecar + pipeline: [memory_limiter, consistent_proportional_sampler/presampler, k8sattributes, batch/traces] + pipeline_sidecar: [memory_limiter, consistent_proportional_sampler/presampler, batch/traces] + #pipeline: [memory_limiter, k8sattributes, consistent_sampler/presampler, batch/traces] replace with this pipeline when testing adding configured p value directly. + consistent_proportional_sampler: + export_period: 1s # the export period for specifying the expected output rate, it is for rate calculation only, NOT for batch interval. The batch interval can be configured at trace_classification_and_sampling.samplers.export_period, or you can add a batch processor before this. + spans_per_period: 1000 # number of spans per request, the expected rate limit is calculated by dividing this number by export_period. The spans per packet is limited by the max packet size, assuming 1MB limit, and each span with size of 1KB + exponential_smooth: 0.1 # start with small number + initial_estimate_rate: 1000 # number of incomming span rate, just give a reasonable guess. + rate_estimator: batch_rate_estimator + sample_mode: presampling + consistent_sampler: + p_value: 1 # user can configure a p value to add to the trace state directly, it is mainly for testing purpose + + # default configuration resulted in about 48% of tier 1 limit, which is 480 request/minute with 1000 spans/request + tailsampler: + enable: false + deploy_mode: gateway_sampler # gateway_sampler, sidecar_gateway, sidecar_sampler, specify the loadbalancer and tailsampling position with _ + replicas: 1 + service: + name: "appdynamics-otel-collector-sampler-service" + type: ClusterIP + clusterIP: None + ports: + - name: sampler + port: 24317 + protocol: TCP + targetPort: 24317 + # groupbyattrs/compact is for compressing the traces with the same resource or scope + pipeline: [memory_limiter, batch/input, intermediate_sampler, trace_classification_and_sampling, consistent_proportional_sampler, groupbyattrs/compact, batch/traces, batchbybytesize] # the sampler pipeline set up + # when deployed as sidecar_sampler, sidecar will export trace to tail sampler directly, thus we need k8sattributes + pipeline_sidecar_loadbalancer: [memory_limiter, k8sattributes, groupbytrace, trace_classification_and_sampling, consistent_proportional_sampler, groupbyattrs/compact, batchbybytesize] + # classification and balanced sampling + groupbytrace: + wait_duration: 30s + trace_classification_and_sampling: + decision_wait: 30s + num_traces: 1000000 # Limit number of traces to keep in memory waiting for decision. + # classification, example considers error, high latency and all other traces, each category will be rate limit separately. + no_wait: true + independent_grouping: true + policies: + - name: errors-policy + type: status_code + sampler_name: "consistent_reservoir_sampler/error" + status_code: + status_codes: [ERROR] + - name: high-latency + type: latency + sampler_name: "consistent_reservoir_sampler/latency" + latency: + threshold_ms: 10000 + - name: always-on + type: always_sample + sampler_name: "consistent_reservoir_sampler/anyother" + # balanced sampler controls the max rate for a category, the proportion among categories is more important because the final export rate is controlled by the following proportional sampler. + samplers: + export_period: 1s + consistent_reservoir_sampler: + error: + reservoir_size: 1000 + latency: + reservoir_size: 1000 + anyother: + reservoir_size: 1000 + # consistent_proportional_sampler controls the final export rate. + consistent_proportional_sampler: + export_period: 1s # the export period for specifying the expected output rate, it is for rate calculation only, NOT for batch interval. The batch interval can be configured at trace_classification_and_sampling.samplers.export_period, or you can add a batch processor before this. + spans_per_period: 1000 # number of spans per request, the expected rate limit is calculated by dividing this number by export_period. The spans per packet is limited by the max packet size, assuming 1MB limit, and each span with size of 1KB + exponential_smooth: 0.1 # start with small number + initial_estimate_rate: 3000 # number of incomming span rate, just give a reasonable guess. + intermediate_sampler: + export_period: 1s + size_limit: 10000 # the output rate will be at [size_limit, 2*size_limit], i.e. 10000~20000 spans per second + size_limit_type: SpanCount + estimated_sampling: true + exponential_smooth: 0.2 + + selfTelemetry: + exporters: + otlphttp: {} + + + multi_tier: + sidecar: + enable: false + client_side_loadbalancing: false + env: + linux: + spec: + nodeSelector: + kubernetes.io/os: "linux" + windows: + spec: + nodeSelector: + kubernetes.io/os: "windows" + livenessProbe: + initialDelaySeconds: 5 + spec: + image: appdynamics/appdynamics-cloud-apm-collector + mode: sidecar + # based on the perf testing + resources: + limits: + cpu: 200m + memory: 128Mi + requests: + cpu: 10m + memory: 64Mi + config: + extensions: + health_check: + endpoint: 0.0.0.0:13133 + zpages: + endpoint: 0.0.0.0:55679 + processors: + batch/traces: + send_batch_size: 100 + timeout: 1s + batch/metrics: + send_batch_size: 100 + timeout: 1s + batch/logs: + send_batch_size: 100 + timeout: 1s + exporters: + otlp: + endpoint: appdynamics-otel-collector-service.appdynamics.svc.cluster.local:4317 + tls: + insecure: true + logging: + verbosity: detailed + receivers: + otlp: + protocols: + grpc: + endpoint: localhost:4317 + http: + endpoint: localhost:4318 + service: + extensions: [health_check] + pipelines: + metrics: + receivers: [otlp] + processors: [memory_limiter, batch/metrics] + exporters: [otlp] + traces: + receivers: [otlp] + processors: [memory_limiter, batch/traces] + exporters: [otlp] + logs: + receivers: [otlp] + processors: [memory_limiter, batch/logs] + exporters: [otlp] + tailsampler: + enable: false + os: [linux] + env: + linux: + spec: + nodeSelector: + kubernetes.io/os: "linux" + windows: + nodeSelector: + kubernetes.io/os: "windows" + livenessProbe: + initialDelaySeconds: 5 + spec: + image: appdynamics/appdynamics-cloud-otel-collector:24.7.0-1639 + mode: deployment + # based on the perf testing + resources: + limits: + cpu: 1500m + memory: 1536Mi + requests: + cpu: '1' + memory: 1Gi + config: + extensions: + health_check: + endpoint: 0.0.0.0:13133 + zpages: + endpoint: 0.0.0.0:55679 + processors: + batch/input: + send_batch_size: 500 + batch/traces: + send_batch_size: 1000 + send_batch_max_size: 1000 + batchbybytesize: {} + exporters: + otlphttp: + auth: + authenticator: oauth2client + logging: + verbosity: detailed + service: + extensions: [health_check, oauth2client] + truncated: + trace: + resource: 512 + scope: 512 + span: 512 + spanevent: 512 + metric: + resource: + scope: + datapoint: + log: + resource: + scope: + log: + + + + +filelogReceiverConfig: + includeLogsPath: ["/var/log/*/*/*/*log"] + excludeLogsPath: ["/var/log/pods/*otel-collector*/*/*.log"] + messageParserPattern: "timestamp" + messageParserType: "ABSOLUTE" + diff --git a/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-otel-instrumentation/.helmignore b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-otel-instrumentation/.helmignore new file mode 100644 index 00000000..0e8a0eb3 --- /dev/null +++ b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-otel-instrumentation/.helmignore @@ -0,0 +1,23 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*.orig +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ diff --git a/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-otel-instrumentation/Chart.yaml b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-otel-instrumentation/Chart.yaml new file mode 100644 index 00000000..48a5ddf1 --- /dev/null +++ b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-otel-instrumentation/Chart.yaml @@ -0,0 +1,6 @@ +apiVersion: v2 +appVersion: 24.4.0-1589 +description: A Helm chart for installing otel auto instrumentation CRDs +name: appdynamics-otel-instrumentation +type: application +version: 24.4.0-1589 diff --git a/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-otel-instrumentation/examples/defaults.yaml b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-otel-instrumentation/examples/defaults.yaml new file mode 100644 index 00000000..9f2d280e --- /dev/null +++ b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-otel-instrumentation/examples/defaults.yaml @@ -0,0 +1,2 @@ +spec: + java: \ No newline at end of file diff --git a/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-otel-instrumentation/examples/oob.yaml b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-otel-instrumentation/examples/oob.yaml new file mode 100644 index 00000000..253d802b --- /dev/null +++ b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-otel-instrumentation/examples/oob.yaml @@ -0,0 +1,9 @@ +spec: + java: + dotnet: + env: + # Required if endpoint is set to 4317. + # Dotnet autoinstrumentation uses http/proto by default + # See https://github.com/open-telemetry/opentelemetry-dotnet-instrumentation/blob/888e2cd216c77d12e56b54ee91dafbc4e7452a52/docs/config.md#otlp + - name: OTEL_EXPORTER_OTLP_ENDPOINT + value: http://otel-collector:4318 \ No newline at end of file diff --git a/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-otel-instrumentation/templates/Instrumentation.yaml b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-otel-instrumentation/templates/Instrumentation.yaml new file mode 100644 index 00000000..3d508449 --- /dev/null +++ b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-otel-instrumentation/templates/Instrumentation.yaml @@ -0,0 +1,36 @@ +{{- define "appdynamics-otel-instrumentation.exporter" -}} +env: + - name: OTEL_EXPORTER_OTLP_PROTOCOL + value: {{ternary "http/protobuf" "grpc" (eq .Values.exporter.protocol "http")}} +exporter: + {{- $scheme := ternary "http://" "https://" .Values.exporter.insecure }} + endpoint: {{$scheme}}{{tpl (dig .Values.exporter.protocol "endpoint" "0.0.0.0:4317" .Values.exporter) .}} +{{- end -}} +{{- define "appdynamics-otel-instrumentation.language.specific.default" -}} +# tracer level configuration - uncomment to enable instrumentation for the language +#java: +{{- $scheme := ternary "http://" "https://" .Values.exporter.insecure }} +python: + env: + - name: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT + value: {{$scheme}}{{tpl (dig "http" "endpoint" "0.0.0.0:4317" .Values.exporter) .}}/v1/traces + - name: OTEL_TRACES_EXPORTER + value: otlp_proto_http +#nodejs: +dotnet: + env: + - name: OTEL_EXPORTER_OTLP_ENDPOINT + value: {{$scheme}}{{tpl (dig "http" "endpoint" "0.0.0.0:4317" .Values.exporter) .}} + - name: OTEL_EXPORTER_OTLP_PROTOCOL + value: http/protobuf +{{- end -}} +--- +apiVersion: opentelemetry.io/v1alpha1 +kind: Instrumentation +metadata: + name: {{ .Values.name | default .Release.Name }} + namespace: {{ .Values.namespace | default .Release.Namespace }} +spec: +{{ $spec := include "appdynamics-otel-instrumentation.exporter" . | fromYaml }} +{{- merge .Values.spec $spec (include "appdynamics-otel-instrumentation.language.specific.default" . | fromYaml) | toYaml | indent 2 }} + diff --git a/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-otel-instrumentation/templates/_agent_management_and_global_input.tpl b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-otel-instrumentation/templates/_agent_management_and_global_input.tpl new file mode 100644 index 00000000..d10761fd --- /dev/null +++ b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-otel-instrumentation/templates/_agent_management_and_global_input.tpl @@ -0,0 +1,15 @@ +{{- define "appdynamics-otel-collector.namespace" -}} +{{- if .Values.global.smartAgentInstall -}} +{{- default .Release.Namespace .Values.global.namespace }} +{{- else }} +{{- .Release.Namespace }} +{{- end }} +{{- end }} + +{{- define "appdynamics-otel-collector.endpoint" -}} +{{- if (.Values.global.customService).enable -}} +{{.Values.global.customService.name}}.{{.Values.global.customService.namespace}}.svc.cluster.local +{{- else -}} +appdynamics-otel-collector-service.{{ include "appdynamics-otel-collector.namespace" .}}.svc.cluster.local +{{- end }} +{{- end }} \ No newline at end of file diff --git a/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-otel-instrumentation/values.schema.json b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-otel-instrumentation/values.schema.json new file mode 100644 index 00000000..7a72a8ec --- /dev/null +++ b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-otel-instrumentation/values.schema.json @@ -0,0 +1,76 @@ +{ + "$schema": "http://json-schema.org/schema#", + "type": "object", + "title": "Values", + "additionalProperties": false, + "properties": { + "global": { + "type": "object", + "customService": { + "description": "A custom service which its name and namespace can be configured for user application, shared with collector helm chart.", + "type": "object", + "additionalProperties": false, + "properties": { + "enable" : { + "description": "Whether the custom service will be created, false by default", + "type": "boolean" + }, + "name": { + "description": "The name for the custom service", + "type": "string" + }, + "namespace": { + "description": "The namespace for the custom service", + "type": "string" + } + } + } + }, + "name": { + "description": "name for the opentelemetry instrumentation custom resource.", + "type": "string" + }, + "exporter": { + "description": "exporter config for the instrumentation agent.", + "type": "object", + "properties": { + "protocol": { + "description": "protocol used for communication, could be http or grpc, endpoint will be auto selected for the protocol.", + "type": "string", + "enum": [ + "http", + "grpc" + ] + }, + "insecure": { + "description": "whether use TLS for communication, disable TLS by set it to true.", + "type": "boolean" + }, + "grpc": { + "properties": { + "endpoint": { + "description": "grpc exporter endpoint.", + "type": "string" + } + } + }, + "http": { + "properties": { + "endpoint": { + "description": "http exporter endpoint.", + "type": "string" + } + } + } + } + }, + "spec": { + "description": "instrumentation spec.", + "type": "object" + }, + "enabled" : { + "description": " whether instrumentation enabled.", + "type": "boolean" + } + } +} \ No newline at end of file diff --git a/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-otel-instrumentation/values.yaml b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-otel-instrumentation/values.yaml new file mode 100644 index 00000000..933f0522 --- /dev/null +++ b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-otel-instrumentation/values.yaml @@ -0,0 +1,31 @@ +# Default values for appdynamics-otel-instrumentation. +# This is a YAML-formatted file. +# Declare variables to be passed into your templates. +name: appd-instrumentation +#namespace: appdynamics + +global: + customService: + enable: false + +# exporter configuration - default grpc with insecure +exporter: + protocol: grpc + insecure: true + grpc: + endpoint: "{{ include \"appdynamics-otel-collector.endpoint\" .}}:4317" + http: + endpoint: "{{ include \"appdynamics-otel-collector.endpoint\" .}}:4318" + +spec: + # environment variables (common) + #env: [] + sampler: + type: parentbased_always_on + # common resource attributes, comment out default values which will cause argocd out of sync. + #resource: + # addK8sUIDAttributes: false + # resourceAttributes: {} + # trace propogators + propagators: + - tracecontext \ No newline at end of file diff --git a/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/Chart.yaml b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/Chart.yaml new file mode 100644 index 00000000..82313a08 --- /dev/null +++ b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/Chart.yaml @@ -0,0 +1,15 @@ +apiVersion: v2 +appVersion: 1.0.26 +dependencies: +- name: panoptica + repository: oci://us-docker.pkg.dev/eticloud/panoptica-public-registry + version: 1.218.0 +description: Helm Chart to deploy security collector as a deployment and corresponding + dbconfigs. +home: https://appdynamics.com +icon: https://raw.githubusercontent.com/CiscoDevNet/appdynamics-charts/master/logo.png +maintainers: +- email: support@appdynamics.com + name: AppDynamics +name: appdynamics-security-collector +version: 1.0.26 diff --git a/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/.helmignore b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/.helmignore new file mode 100644 index 00000000..6c5b28ae --- /dev/null +++ b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/.helmignore @@ -0,0 +1,27 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*.orig +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ +# Ignore helm lint testing ci values +ci/ + +*.mustache \ No newline at end of file diff --git a/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/Chart.yaml b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/Chart.yaml new file mode 100644 index 00000000..98716351 --- /dev/null +++ b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/Chart.yaml @@ -0,0 +1,12 @@ +apiVersion: v2 +appVersion: 1.218.0 +dependencies: +- alias: apiclarity-postgresql + condition: global.isAPISecurityEnabled + name: postgresql + repository: https://charts.bitnami.com/bitnami + version: 11.6.12 +description: Charts for Panoptica deployments. +name: panoptica +type: application +version: 1.218.0 diff --git a/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/README.md b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/README.md new file mode 100644 index 00000000..6921c02e --- /dev/null +++ b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/README.md @@ -0,0 +1,249 @@ +# panoptica + +![Version: 1.218.0](https://img.shields.io/badge/Version-1.218.0-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: 1.218.0](https://img.shields.io/badge/AppVersion-1.218.0-informational?style=flat-square) + +Charts for Panoptica deployments. + +## Requirements + +| Repository | Name | Version | +|------------|------|---------| +| https://charts.bitnami.com/bitnami | apiclarity-postgresql(postgresql) | 11.6.12 | + +## Values + +| Key | Type | Default | Description | +|-----|------|---------|-------------| +| apiclarity-postgresql.auth.database | string | `"apiclarity"` | | +| apiclarity-postgresql.auth.existingSecret | string | `"apiclarity-postgresql-secret"` | | +| apiclarity-postgresql.containerSecurityContext.enabled | bool | `true` | | +| apiclarity-postgresql.containerSecurityContext.runAsNonRoot | bool | `true` | | +| apiclarity-postgresql.containerSecurityContext.runAsUser | int | `1001` | | +| apiclarity-postgresql.fullnameOverride | string | `"apiclarity-postgresql"` | | +| apiclarity-postgresql.image.pullPolicy | string | `"IfNotPresent"` | | +| apiclarity-postgresql.image.registry | string | `"gcr.io/eticloud/k8sec"` | Image registry, must be set to override the dependency registry. | +| apiclarity-postgresql.image.repository | string | `"bitnami/postgresql"` | | +| apiclarity-postgresql.image.tag | string | `"14.4.0-debian-11-r4"` | | +| apiclarity.affinity | object | `{}` | | +| apiclarity.fuzzer.affinity | object | `{}` | | +| apiclarity.fuzzer.debug | bool | `false` | | +| apiclarity.fuzzer.enabled | bool | `false` | | +| apiclarity.fuzzer.image.pullPolicy | string | `"Always"` | | +| apiclarity.fuzzer.image.registry | string | `""` | Image registry, used to override global.registry if needed. | +| apiclarity.fuzzer.image.repository | string | `"scn-dast"` | | +| apiclarity.fuzzer.image.tag | string | `"b0e698ea50aa701d22a1f8fbe549d45c340e0b91"` | | +| apiclarity.fuzzer.labels | object | `{"app":"fuzzer"}` | Configure fuzzer labels | +| apiclarity.fuzzer.nodeSelector | object | `{"kubernetes.io/os":"linux"}` | Node labels for pod assignment | +| apiclarity.fuzzer.resources.limits.cpu | string | `"200m"` | | +| apiclarity.fuzzer.resources.limits.memory | string | `"1000Mi"` | | +| apiclarity.fuzzer.resources.requests.cpu | string | `"100m"` | | +| apiclarity.fuzzer.resources.requests.memory | string | `"200Mi"` | | +| apiclarity.fuzzer.securityContext.allowPrivilegeEscalation | bool | `false` | | +| apiclarity.fuzzer.securityContext.capabilities.drop[0] | string | `"ALL"` | | +| apiclarity.fuzzer.securityContext.privileged | bool | `false` | | +| apiclarity.fuzzer.securityContext.readOnlyRootFilesystem | bool | `true` | | +| apiclarity.fuzzer.securityContext.runAsGroup | int | `1001` | | +| apiclarity.fuzzer.securityContext.runAsNonRoot | bool | `true` | | +| apiclarity.fuzzer.securityContext.runAsUser | int | `1001` | | +| apiclarity.image.pullPolicy | string | `"IfNotPresent"` | | +| apiclarity.image.registry | string | `""` | Image registry, used to override global.registry if needed. | +| apiclarity.image.repository | string | `"apiclarity"` | | +| apiclarity.image.tag | string | `"9a09d167c27046e6d76a96e6e4f248f166b9fc8f"` | | +| apiclarity.imagePullSecrets | list | `[]` | | +| apiclarity.logLevel | string | `"warning"` | Logging level (debug, info, warning, error, fatal, panic). | +| apiclarity.nodeSelector | object | `{"kubernetes.io/os":"linux"}` | Node labels for pod assignment | +| apiclarity.persistence.accessMode | string | `"ReadWriteOnce"` | | +| apiclarity.persistence.size | string | `"100Mi"` | The storage space that should be claimed from the persistent volume | +| apiclarity.persistence.storageClass | string | `nil` | If defined, storageClassName will be set to the defined storageClass. If set to "-", storageClassName will be set to an empty string (""), which disables dynamic provisioning. If undefined or set to null (the default), no storageClassName spec is set, choosing 'standard' storage class available with the default provisioner (gcd-pd on GKE, hostpath on minikube, etc). | +| apiclarity.podSecurityContext.fsGroup | int | `1000` | | +| apiclarity.resources.limits.cpu | string | `"1000m"` | | +| apiclarity.resources.limits.memory | string | `"1000Mi"` | | +| apiclarity.resources.requests.cpu | string | `"100m"` | | +| apiclarity.resources.requests.memory | string | `"200Mi"` | | +| apiclarity.securityContext.allowPrivilegeEscalation | bool | `false` | | +| apiclarity.securityContext.capabilities.drop[0] | string | `"ALL"` | | +| apiclarity.securityContext.privileged | bool | `false` | | +| apiclarity.securityContext.readOnlyRootFilesystem | bool | `true` | | +| apiclarity.securityContext.runAsGroup | int | `1000` | | +| apiclarity.securityContext.runAsNonRoot | bool | `true` | | +| apiclarity.securityContext.runAsUser | int | `1000` | | +| apiclarity.serviceAccount.annotations | object | `{}` | Annotations to add to the service account | +| apiclarity.serviceAccount.create | bool | `true` | Specifies whether a service account should be created | +| apiclarity.serviceAccount.name | string | `""` | The name of the service account to use. If not set and create is true, a name is generated using the fullname template | +| apiclarity.tolerations | list | `[]` | | +| apiclarity.traceSource.external | bool | `false` | Indicates whether external GWs supply traces. | +| apiclarity.traceSource.istio | bool | `false` | Indicates whether istio supply traces. | +| apiclarity.traceWasmFilterSHA256 | string | `"5f48a298d47422f6fb8e03b5c856fae5c4aaab60b8b9e9f28a13ca34d22bf0b7"` | | +| busybox.image.pullPolicy | string | `"IfNotPresent"` | | +| busybox.image.registry | string | `""` | Image registry, used to override global.registry if needed. | +| busybox.image.repository | string | `"curlimages/curl"` | | +| busybox.image.tag | string | `"latest"` | | +| busybox.securityContext.allowPrivilegeEscalation | bool | `false` | | +| busybox.securityContext.capabilities.drop[0] | string | `"ALL"` | | +| busybox.securityContext.privileged | bool | `false` | | +| busybox.securityContext.readOnlyRootFilesystem | bool | `true` | | +| busybox.securityContext.runAsGroup | int | `1001` | | +| busybox.securityContext.runAsNonRoot | bool | `true` | | +| busybox.securityContext.runAsUser | int | `1001` | | +| controller.affinity | object | `{}` | | +| controller.agentID | string | `""` | [Required] Controller identification, should be extracted from SaaS after cluster creation. | +| controller.autoscaling.enabled | bool | `true` | | +| controller.autoscaling.maxReplicas | int | `5` | | +| controller.autoscaling.minReplicas | int | `1` | | +| controller.autoscaling.targetCPUUtilizationPercentage | int | `80` | | +| controller.fullnameOverride | string | `"portshift-agent"` | | +| controller.image.pullPolicy | string | `"IfNotPresent"` | | +| controller.image.registry | string | `""` | Image registry, used to override global.registry if needed. | +| controller.image.repository | string | `"k8s_agent"` | | +| controller.image.tag | string | `"fdb16f4d5c28fef6538d01b07ed2520bc9253809"` | | +| controller.imagePullSecrets | list | `[]` | | +| controller.logLevel | string | `"warning"` | Logging level (debug, info, warning, error, fatal, panic). | +| controller.nameOverride | string | `"portshift-agent"` | | +| controller.nodeSelector | object | `{"kubernetes.io/os":"linux"}` | Node labels for controller pod assignment | +| controller.pdb.create | bool | `true` | | +| controller.pdb.minAvailable | int | `1` | | +| controller.persistence.accessMode | string | `"ReadWriteOnce"` | | +| controller.persistence.enabled | bool | `false` | Enable persistence using Persistent Volume Claims | +| controller.persistence.pvcSuffix | string | `"pvc-fdb16f4d5c28fef6538d01b07ed2520bc9253809"` | | +| controller.persistence.size | string | `"100Mi"` | The storage space that should be claimed from the persistent volume | +| controller.persistence.storageClass | string | `nil` | If defined, storageClassName will be set to the defined storageClass. If set to "-", storageClassName will be set to an empty string (""), which disables dynamic provisioning. If undefined or set to null (the default), no storageClassName spec is set, choosing 'standard' storage class available with the default provisioner (gcd-pd on GKE, hostpath on minikube, etc). | +| controller.podSecurityContext.fsGroup | int | `1001` | | +| controller.replicaCount | int | `1` | Configure controller replica count number in case autoscaling is disabled. | +| controller.resources.requests.cpu | string | `"500m"` | | +| controller.resources.requests.memory | string | `"2048Mi"` | | +| controller.secret.existingSecret | string | `""` | Existing secret that contains shared secret used by the controller to communicate with the SaaS. | +| controller.secret.sharedSecret | string | `""` | [Required if controller.existingSecret isn't set] Shared secret used by the controller to communicate with the SaaS, should be extracted from SaaS after cluster creation. | +| controller.secret.sharedSecretKey | string | `""` | Shared secret key is the key of the shared secret, default: SHARED_SECRET. | +| controller.securityContext.allowPrivilegeEscalation | bool | `false` | | +| controller.securityContext.capabilities.drop[0] | string | `"ALL"` | | +| controller.securityContext.privileged | bool | `false` | | +| controller.securityContext.readOnlyRootFilesystem | bool | `true` | | +| controller.securityContext.runAsGroup | int | `1001` | | +| controller.securityContext.runAsNonRoot | bool | `true` | | +| controller.securityContext.runAsUser | int | `1001` | | +| controller.service.type | string | `"ClusterIP"` | | +| controller.serviceAccount.annotations | object | `{}` | Annotations to add to the service account | +| controller.serviceAccount.create | bool | `true` | Specifies whether a service account should be created | +| controller.serviceAccount.name | string | `""` | The name of the service account to use. If not set and create is true, a name is generated using the fullname template | +| controller.tolerations | list | `[]` | | +| dnsDetector.image.pullPolicy | string | `"IfNotPresent"` | | +| dnsDetector.image.registry | string | `""` | Image registry, used to override global.registry if needed. | +| dnsDetector.image.repository | string | `"gopassivedns"` | | +| dnsDetector.image.tag | string | `"0c7330b51a07cdebe13e57b1d1a33134cbbe04ce"` | | +| dnsDetector.resources.limits.cpu | string | `"200m"` | | +| dnsDetector.resources.limits.memory | string | `"100Mi"` | | +| dnsDetector.resources.requests.cpu | string | `"20m"` | | +| dnsDetector.resources.requests.memory | string | `"50Mi"` | | +| global.autoLabelEnabled | bool | `false` | Indicates whether auto label is enabled. If true, new namespaces will be labeled with the protection label. | +| global.cdValidation | bool | `false` | Indicates whether to identity pods whose templates originated from the Panoptica CD plugin. See `CD Pod template` section in https://panoptica.readme.io/docs/deploy-on-a-kubernetes-cluster for more info. | +| global.ciImageSignatureValidation | bool | `false` | Indicates whether to identity pods only if all images are signed by a trusted signer. See https://panoptica.readme.io/docs/trusted-signers for more info. | +| global.ciImageValidation | bool | `false` | Indicates whether to identity pods only if all image hashes are known to Panoptica. See `CI image hash validation` section in https://panoptica.readme.io/docs/deploy-on-a-kubernetes-cluster for more info. | +| global.connectionFailPolicyAllow | bool | `true` | If false, connections on protected namespaces will be blocked if the controller is not responding. | +| global.dummyPlaceHolderForTest | bool | `false` | Placeholder used for tests. | +| global.enableTlsInspection | bool | `false` | Indicates whether TLS inspection is enabled. If true, the controller will be able to decrypt and re-encrypt HTTPS traffic for connections to be inspected via layer 7 attributes. | +| global.environmentFailurePolicyAllow | bool | `true` | If false, pods creation on protected namespaces will be blocked if the controller is not responding. | +| global.extraLabels | object | `{}` | Allow labelling resources with custom key/value pairs. | +| global.httpProxy | string | `""` | Proxy address to use for HTTP request if needed. | +| global.httpsProxy | string | `""` | Proxy address to use for HTTPs request if needed. In most cases, this is the same as `httpProxy`. | +| global.isAPISecurityEnabled | bool | `false` | Indicates whether API security is enabled. | +| global.isConnectionEnforcementEnabled | bool | `false` | Indicates whether connection enforcement is enabled. If true, make sure istio is installed by using panoptica istio chart or an upstream istio is already installed. | +| global.isContainerSecurityEnabled | bool | `true` | Indicates whether kubernetes security is enabled. | +| global.isExternalCaEnabled | bool | `false` | Indicates whether istio should provision workload certificates using a custom certificate authority that integrates with the Kubernetes CSR API. | +| global.isOpenShift | bool | `false` | Indicates whether installed in an OpenShift environment. | +| global.isSSHMonitorEnabled | bool | `true` | Indicates whether SSH monitoring is enabled. | +| global.k8sCisBenchmarkEnabled | bool | `true` | Indicates whether K8s CIS benchmark is enabled. | +| global.k8sEventsEnabled | bool | `true` | Indicates whether K8s Events monitoring is enabled. | +| global.kubeVersionOverride | string | `""` | Override detected cluster version. | +| global.mgmtHostname | string | `""` | Panoptica SaaS URL. Used to override default URL for local testing. | +| global.preserveOriginalSourceIp | bool | `false` | Indicates whether the controller should preserve the original source ip of inbound connections. | +| global.productNameOverride | string | `"portshift"` | Override product name. Defaults to chart name. | +| global.registry | string | `"gcr.io/eticloud/k8sec"` | Registry for the Panoptica images. If replaced with a local registry need to make sure all images are pulled into the local registry. | +| global.restrictRegistries | bool | `false` | Indicates whether to identity pods only if all images are pulled from trusted registries. See `Restrict Registries` section in https://panoptica.readme.io/docs/deploy-on-a-kubernetes-cluster for more info. | +| global.sendTelemetriesIntervalSec | int | `30` | Configures telemetry frequency (in seconds) for reporting duration. | +| global.tokenInjectionEnabled | bool | `false` | Indicates whether token injection feature is enabled. If true, make sure vault is installed by using panoptica vault chart. | +| global.validateDeployerPolicy | bool | `false` | Indicates whether Deployer Policy enforcement is enabled. | +| imageAnalysis.cisDockerBenchmark.enabled | bool | `false` | | +| imageAnalysis.cisDockerBenchmark.image.registry | string | `""` | Image registry, used to override global.registry if needed. | +| imageAnalysis.cisDockerBenchmark.image.repository | string | `"cis-docker-benchmark"` | | +| imageAnalysis.cisDockerBenchmark.image.tag | string | `"a281d02d480ba3fc815d176731fa9412fe872ad3"` | | +| imageAnalysis.cisDockerBenchmark.podSecurityContext.fsGroup | int | `1001` | | +| imageAnalysis.cisDockerBenchmark.resources.limits.cpu | string | `"1000m"` | | +| imageAnalysis.cisDockerBenchmark.resources.limits.memory | string | `"1000Mi"` | | +| imageAnalysis.cisDockerBenchmark.resources.requests.cpu | string | `"50m"` | | +| imageAnalysis.cisDockerBenchmark.resources.requests.memory | string | `"50Mi"` | | +| imageAnalysis.cisDockerBenchmark.securityContext.allowPrivilegeEscalation | bool | `false` | | +| imageAnalysis.cisDockerBenchmark.securityContext.capabilities.drop[0] | string | `"ALL"` | | +| imageAnalysis.cisDockerBenchmark.securityContext.privileged | bool | `false` | | +| imageAnalysis.cisDockerBenchmark.securityContext.readOnlyRootFilesystem | bool | `true` | | +| imageAnalysis.cisDockerBenchmark.securityContext.runAsGroup | int | `1001` | | +| imageAnalysis.cisDockerBenchmark.securityContext.runAsNonRoot | bool | `true` | | +| imageAnalysis.cisDockerBenchmark.securityContext.runAsUser | int | `1001` | | +| imageAnalysis.jobDefaultNamespace | string | `""` | Scanner jobs namespace. If left blank, the scanner jobs will run in release namespace. If set, the scanner jobs will run in the given namespace unless the image requires image pull secrets which are located in a target pod | +| imageAnalysis.nodeSelector | object | `{"kubernetes.io/os":"linux"}` | Node labels for controller pod assignment | +| imageAnalysis.parallelScanners | int | `4` | The max number of scanner jobs that will run in the cluster in parallel for image analysis in total | +| imageAnalysis.registry.skipVerifyTlS | string | `"false"` | | +| imageAnalysis.registry.useHTTP | string | `"false"` | | +| imageAnalysis.sbom.enabled | bool | `true` | | +| imageAnalysis.sbom.image.registry | string | `""` | Image registry, used to override global.registry if needed. | +| imageAnalysis.sbom.image.repository | string | `"image-analyzer"` | | +| imageAnalysis.sbom.image.tag | string | `"5f969c4535b52368ff7e288f6c9a2ce8bea019b0"` | | +| imageAnalysis.sbom.podSecurityContext.fsGroup | int | `1001` | | +| imageAnalysis.sbom.resources.limits.cpu | string | `"1000m"` | | +| imageAnalysis.sbom.resources.limits.memory | string | `"2000Mi"` | | +| imageAnalysis.sbom.resources.requests.cpu | string | `"50m"` | | +| imageAnalysis.sbom.resources.requests.memory | string | `"50Mi"` | | +| imageAnalysis.sbom.securityContext.allowPrivilegeEscalation | bool | `false` | | +| imageAnalysis.sbom.securityContext.capabilities.drop[0] | string | `"ALL"` | | +| imageAnalysis.sbom.securityContext.privileged | bool | `false` | | +| imageAnalysis.sbom.securityContext.readOnlyRootFilesystem | bool | `true` | | +| imageAnalysis.sbom.securityContext.runAsGroup | int | `1001` | | +| imageAnalysis.sbom.securityContext.runAsNonRoot | bool | `true` | | +| imageAnalysis.sbom.securityContext.runAsUser | int | `1001` | | +| imageAnalysis.tolerations | list | `[]` | | +| istio.expansion.enabled | bool | `false` | | +| istio.global.alreadyInstalled | bool | `false` | Indicates whether istio is already installed and not by Panoptica charts. | +| istio.global.serviceDiscoveryIsolationEnabled | bool | `false` | | +| istio.global.version | string | `"1.19.0"` | Indicates what version of istio is running, change only if `alreadyInstalled` is set to true. | +| k8sCISBenchmark.image.registry | string | `""` | Image registry, used to override global.registry if needed. | +| k8sCISBenchmark.image.repository | string | `"k8s-cis-benchmark"` | | +| k8sCISBenchmark.image.tag | string | `"f5b0490258b1cb87ce6eddc2a3083482135dcf5c"` | | +| k8sCISBenchmark.nodeSelector | object | `{"kubernetes.io/os":"linux"}` | Node labels for pod assignment | +| kafkaAuthzInjector.image.pullPolicy | string | `"Always"` | | +| kafkaAuthzInjector.image.registry | string | `""` | Image registry, used to override global.registry if needed. | +| kafkaAuthzInjector.image.repository | string | `"kafka-authz"` | | +| kafkaAuthzInjector.image.tag | string | `"e647ba66cf10897ee6e07a3d6d81b2148d0a47be"` | | +| kafkaAuthzInjector.securityContext.allowPrivilegeEscalation | bool | `false` | | +| kafkaAuthzInjector.securityContext.capabilities.drop[0] | string | `"ALL"` | | +| kafkaAuthzInjector.securityContext.privileged | bool | `false` | | +| kafkaAuthzInjector.securityContext.readOnlyRootFilesystem | bool | `true` | | +| kafkaAuthzInjector.securityContext.runAsGroup | int | `1001` | | +| kafkaAuthzInjector.securityContext.runAsNonRoot | bool | `true` | | +| kafkaAuthzInjector.securityContext.runAsUser | int | `1001` | | +| kubectl.image.pullPolicy | string | `"IfNotPresent"` | | +| kubectl.image.registry | string | `""` | Image registry, used to override global.registry if needed. | +| kubectl.image.repository | string | `"kubectl"` | | +| kubectl.image.tag | string | `"v1.27.1"` | | +| seccompInstaller.serviceAccount.annotations | object | `{}` | Annotations to add to the service account | +| seccompInstaller.serviceAccount.create | bool | `true` | Specifies whether a service account should be created | +| seccompInstaller.serviceAccount.name | string | `""` | The name of the service account to use. If not set and create is true, a name is generated using the fullname template | +| upgrader.image.pullPolicy | string | `"Always"` | | +| upgrader.nodeSelector | object | `{"kubernetes.io/os":"linux"}` | Node labels for pod assignment | +| upgrader.podSecurityContext.fsGroup | int | `1001` | | +| upgrader.resources.limits.cpu | string | `"1000m"` | | +| upgrader.resources.limits.memory | string | `"1000Mi"` | | +| upgrader.resources.requests.cpu | string | `"50m"` | | +| upgrader.resources.requests.memory | string | `"50Mi"` | | +| upgrader.securityContext.allowPrivilegeEscalation | bool | `false` | | +| upgrader.securityContext.capabilities.drop[0] | string | `"ALL"` | | +| upgrader.securityContext.privileged | bool | `false` | | +| upgrader.securityContext.readOnlyRootFilesystem | bool | `true` | | +| upgrader.securityContext.runAsGroup | int | `1001` | | +| upgrader.securityContext.runAsNonRoot | bool | `true` | | +| upgrader.securityContext.runAsUser | int | `1001` | | +| upgrader.tolerations | list | `[]` | | +| vaultEnv.image.registry | string | `""` | Image registry, used to override global.registry if needed. | +| vaultEnv.image.repository | string | `"bank-vaults/vault-env"` | | +| vaultEnv.image.tag | string | `"v1.21.0"` | | + +---------------------------------------------- +Autogenerated from chart metadata using [helm-docs v1.11.0](https://github.com/norwoodj/helm-docs/releases/v1.11.0) diff --git a/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/charts/postgresql/.helmignore b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/charts/postgresql/.helmignore new file mode 100644 index 00000000..f0c13194 --- /dev/null +++ b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/charts/postgresql/.helmignore @@ -0,0 +1,21 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*~ +# Various IDEs +.project +.idea/ +*.tmproj diff --git a/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/charts/postgresql/Chart.yaml b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/charts/postgresql/Chart.yaml new file mode 100644 index 00000000..c6117503 --- /dev/null +++ b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/charts/postgresql/Chart.yaml @@ -0,0 +1,30 @@ +annotations: + category: Database +apiVersion: v2 +appVersion: 14.4.0 +dependencies: +- name: common + repository: https://charts.bitnami.com/bitnami + version: 1.x.x +description: PostgreSQL (Postgres) is an open source object-relational database known + for reliability and data integrity. ACID-compliant, it supports foreign keys, joins, + views, triggers and stored procedures. +home: https://github.com/bitnami/charts/tree/master/bitnami/postgresql +icon: https://bitnami.com/assets/stacks/postgresql/img/postgresql-stack-220x234.png +keywords: +- postgresql +- postgres +- database +- sql +- replication +- cluster +maintainers: +- name: Bitnami + url: https://github.com/bitnami/charts +- email: cedric@desaintmartin.fr + name: desaintmartin +name: postgresql +sources: +- https://github.com/bitnami/bitnami-docker-postgresql +- https://www.postgresql.org/ +version: 11.6.12 diff --git a/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/charts/postgresql/README.md b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/charts/postgresql/README.md new file mode 100644 index 00000000..ec97a8fa --- /dev/null +++ b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/charts/postgresql/README.md @@ -0,0 +1,672 @@ + + +# PostgreSQL packaged by Bitnami + +PostgreSQL (Postgres) is an open source object-relational database known for reliability and data integrity. ACID-compliant, it supports foreign keys, joins, views, triggers and stored procedures. + +[Overview of PostgreSQL](http://www.postgresql.org) + +Trademarks: This software listing is packaged by Bitnami. The respective trademarks mentioned in the offering are owned by the respective companies, and use of them does not imply any affiliation or endorsement. + +## TL;DR + +```bash +helm repo add bitnami https://charts.bitnami.com/bitnami +helm install my-release bitnami/postgresql +``` + +## Introduction + +This chart bootstraps a [PostgreSQL](https://github.com/bitnami/bitnami-docker-postgresql) deployment on a [Kubernetes](https://kubernetes.io) cluster using the [Helm](https://helm.sh) package manager. + +For HA, please see [this repo](https://github.com/bitnami/charts/tree/master/bitnami/postgresql-ha) + +Bitnami charts can be used with [Kubeapps](https://kubeapps.dev/) for deployment and management of Helm Charts in clusters. + +## Prerequisites + +- Kubernetes 1.19+ +- Helm 3.2.0+ +- PV provisioner support in the underlying infrastructure + +## Installing the Chart + +To install the chart with the release name `my-release`: + +```bash +helm install my-release bitnami/postgresql +``` + +The command deploys PostgreSQL on the Kubernetes cluster in the default configuration. The [Parameters](#parameters) section lists the parameters that can be configured during installation. + +> **Tip**: List all releases using `helm list` + +## Uninstalling the Chart + +To uninstall/delete the `my-release` deployment: + +```console +helm delete my-release +``` + +The command removes all the Kubernetes components but PVC's associated with the chart and deletes the release. + +To delete the PVC's associated with `my-release`: + +```bash +kubectl delete pvc -l release=my-release +``` + +> **Note**: Deleting the PVC's will delete postgresql data as well. Please be cautious before doing it. + +## Parameters + +### Global parameters + +| Name | Description | Value | +| ---------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----- | +| `global.imageRegistry` | Global Docker image registry | `""` | +| `global.imagePullSecrets` | Global Docker registry secret names as an array | `[]` | +| `global.storageClass` | Global StorageClass for Persistent Volume(s) | `""` | +| `global.postgresql.auth.postgresPassword` | Password for the "postgres" admin user (overrides `auth.postgresPassword`) | `""` | +| `global.postgresql.auth.username` | Name for a custom user to create (overrides `auth.username`) | `""` | +| `global.postgresql.auth.password` | Password for the custom user to create (overrides `auth.password`) | `""` | +| `global.postgresql.auth.database` | Name for a custom database to create (overrides `auth.database`) | `""` | +| `global.postgresql.auth.existingSecret` | Name of existing secret to use for PostgreSQL credentials (overrides `auth.existingSecret`). | `""` | +| `global.postgresql.auth.secretKeys.adminPasswordKey` | Name of key in existing secret to use for PostgreSQL credentials (overrides `auth.secretKeys.adminPasswordKey`). Only used when `global.postgresql.auth.existingSecret` is set. | `""` | +| `global.postgresql.auth.secretKeys.userPasswordKey` | Name of key in existing secret to use for PostgreSQL credentials (overrides `auth.secretKeys.userPasswordKey`). Only used when `global.postgresql.auth.existingSecret` is set. | `""` | +| `global.postgresql.auth.secretKeys.replicationPasswordKey` | Name of key in existing secret to use for PostgreSQL credentials (overrides `auth.secretKeys.replicationPasswordKey`). Only used when `global.postgresql.auth.existingSecret` is set. | `""` | +| `global.postgresql.service.ports.postgresql` | PostgreSQL service port (overrides `service.ports.postgresql`) | `""` | + + +### Common parameters + +| Name | Description | Value | +| ------------------------ | -------------------------------------------------------------------------------------------- | --------------- | +| `kubeVersion` | Override Kubernetes version | `""` | +| `nameOverride` | String to partially override common.names.fullname template (will maintain the release name) | `""` | +| `fullnameOverride` | String to fully override common.names.fullname template | `""` | +| `clusterDomain` | Kubernetes Cluster Domain | `cluster.local` | +| `extraDeploy` | Array of extra objects to deploy with the release (evaluated as a template) | `[]` | +| `commonLabels` | Add labels to all the deployed resources | `{}` | +| `commonAnnotations` | Add annotations to all the deployed resources | `{}` | +| `diagnosticMode.enabled` | Enable diagnostic mode (all probes will be disabled and the command will be overridden) | `false` | +| `diagnosticMode.command` | Command to override all containers in the statefulset | `["sleep"]` | +| `diagnosticMode.args` | Args to override all containers in the statefulset | `["infinity"]` | + + +### PostgreSQL common parameters + +| Name | Description | Value | +| ---------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -------------------------- | +| `image.registry` | PostgreSQL image registry | `docker.io` | +| `image.repository` | PostgreSQL image repository | `bitnami/postgresql` | +| `image.tag` | PostgreSQL image tag (immutable tags are recommended) | `14.4.0-debian-11-r4` | +| `image.pullPolicy` | PostgreSQL image pull policy | `IfNotPresent` | +| `image.pullSecrets` | Specify image pull secrets | `[]` | +| `image.debug` | Specify if debug values should be set | `false` | +| `auth.enablePostgresUser` | Assign a password to the "postgres" admin user. Otherwise, remote access will be blocked for this user | `true` | +| `auth.postgresPassword` | Password for the "postgres" admin user. Ignored if `auth.existingSecret` with key `postgres-password` is provided | `""` | +| `auth.username` | Name for a custom user to create | `""` | +| `auth.password` | Password for the custom user to create. Ignored if `auth.existingSecret` with key `password` is provided | `""` | +| `auth.database` | Name for a custom database to create | `""` | +| `auth.replicationUsername` | Name of the replication user | `repl_user` | +| `auth.replicationPassword` | Password for the replication user. Ignored if `auth.existingSecret` with key `replication-password` is provided | `""` | +| `auth.existingSecret` | Name of existing secret to use for PostgreSQL credentials. `auth.postgresPassword`, `auth.password`, and `auth.replicationPassword` will be ignored and picked up from this secret. The secret might also contains the key `ldap-password` if LDAP is enabled. `ldap.bind_password` will be ignored and picked from this secret in this case. | `""` | +| `auth.secretKeys.adminPasswordKey` | Name of key in existing secret to use for PostgreSQL credentials. Only used when `auth.existingSecret` is set. | `postgres-password` | +| `auth.secretKeys.userPasswordKey` | Name of key in existing secret to use for PostgreSQL credentials. Only used when `auth.existingSecret` is set. | `password` | +| `auth.secretKeys.replicationPasswordKey` | Name of key in existing secret to use for PostgreSQL credentials. Only used when `auth.existingSecret` is set. | `replication-password` | +| `auth.usePasswordFiles` | Mount credentials as a files instead of using an environment variable | `false` | +| `architecture` | PostgreSQL architecture (`standalone` or `replication`) | `standalone` | +| `replication.synchronousCommit` | Set synchronous commit mode. Allowed values: `on`, `remote_apply`, `remote_write`, `local` and `off` | `off` | +| `replication.numSynchronousReplicas` | Number of replicas that will have synchronous replication. Note: Cannot be greater than `readReplicas.replicaCount`. | `0` | +| `replication.applicationName` | Cluster application name. Useful for advanced replication settings | `my_application` | +| `containerPorts.postgresql` | PostgreSQL container port | `5432` | +| `audit.logHostname` | Log client hostnames | `false` | +| `audit.logConnections` | Add client log-in operations to the log file | `false` | +| `audit.logDisconnections` | Add client log-outs operations to the log file | `false` | +| `audit.pgAuditLog` | Add operations to log using the pgAudit extension | `""` | +| `audit.pgAuditLogCatalog` | Log catalog using pgAudit | `off` | +| `audit.clientMinMessages` | Message log level to share with the user | `error` | +| `audit.logLinePrefix` | Template for log line prefix (default if not set) | `""` | +| `audit.logTimezone` | Timezone for the log timestamps | `""` | +| `ldap.enabled` | Enable LDAP support | `false` | +| `ldap.server` | IP address or name of the LDAP server. | `""` | +| `ldap.port` | Port number on the LDAP server to connect to | `""` | +| `ldap.prefix` | String to prepend to the user name when forming the DN to bind | `""` | +| `ldap.suffix` | String to append to the user name when forming the DN to bind | `""` | +| `ldap.basedn` | Root DN to begin the search for the user in | `""` | +| `ldap.binddn` | DN of user to bind to LDAP | `""` | +| `ldap.bindpw` | Password for the user to bind to LDAP | `""` | +| `ldap.searchAttribute` | Attribute to match against the user name in the search | `""` | +| `ldap.searchFilter` | The search filter to use when doing search+bind authentication | `""` | +| `ldap.scheme` | Set to `ldaps` to use LDAPS | `""` | +| `ldap.tls.enabled` | Se to true to enable TLS encryption | `false` | +| `ldap.uri` | LDAP URL beginning in the form `ldap[s]://host[:port]/basedn`. If provided, all the other LDAP parameters will be ignored. | `""` | +| `postgresqlDataDir` | PostgreSQL data dir folder | `/bitnami/postgresql/data` | +| `postgresqlSharedPreloadLibraries` | Shared preload libraries (comma-separated list) | `pgaudit` | +| `shmVolume.enabled` | Enable emptyDir volume for /dev/shm for PostgreSQL pod(s) | `true` | +| `shmVolume.sizeLimit` | Set this to enable a size limit on the shm tmpfs | `""` | +| `tls.enabled` | Enable TLS traffic support | `false` | +| `tls.autoGenerated` | Generate automatically self-signed TLS certificates | `false` | +| `tls.preferServerCiphers` | Whether to use the server's TLS cipher preferences rather than the client's | `true` | +| `tls.certificatesSecret` | Name of an existing secret that contains the certificates | `""` | +| `tls.certFilename` | Certificate filename | `""` | +| `tls.certKeyFilename` | Certificate key filename | `""` | +| `tls.certCAFilename` | CA Certificate filename | `""` | +| `tls.crlFilename` | File containing a Certificate Revocation List | `""` | + + +### PostgreSQL Primary parameters + +| Name | Description | Value | +| -------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------ | --------------------- | +| `primary.configuration` | PostgreSQL Primary main configuration to be injected as ConfigMap | `""` | +| `primary.pgHbaConfiguration` | PostgreSQL Primary client authentication configuration | `""` | +| `primary.existingConfigmap` | Name of an existing ConfigMap with PostgreSQL Primary configuration | `""` | +| `primary.extendedConfiguration` | Extended PostgreSQL Primary configuration (appended to main or default configuration) | `""` | +| `primary.existingExtendedConfigmap` | Name of an existing ConfigMap with PostgreSQL Primary extended configuration | `""` | +| `primary.initdb.args` | PostgreSQL initdb extra arguments | `""` | +| `primary.initdb.postgresqlWalDir` | Specify a custom location for the PostgreSQL transaction log | `""` | +| `primary.initdb.scripts` | Dictionary of initdb scripts | `{}` | +| `primary.initdb.scriptsConfigMap` | ConfigMap with scripts to be run at first boot | `""` | +| `primary.initdb.scriptsSecret` | Secret with scripts to be run at first boot (in case it contains sensitive information) | `""` | +| `primary.initdb.user` | Specify the PostgreSQL username to execute the initdb scripts | `""` | +| `primary.initdb.password` | Specify the PostgreSQL password to execute the initdb scripts | `""` | +| `primary.standby.enabled` | Whether to enable current cluster's primary as standby server of another cluster or not | `false` | +| `primary.standby.primaryHost` | The Host of replication primary in the other cluster | `""` | +| `primary.standby.primaryPort` | The Port of replication primary in the other cluster | `""` | +| `primary.extraEnvVars` | Array with extra environment variables to add to PostgreSQL Primary nodes | `[]` | +| `primary.extraEnvVarsCM` | Name of existing ConfigMap containing extra env vars for PostgreSQL Primary nodes | `""` | +| `primary.extraEnvVarsSecret` | Name of existing Secret containing extra env vars for PostgreSQL Primary nodes | `""` | +| `primary.command` | Override default container command (useful when using custom images) | `[]` | +| `primary.args` | Override default container args (useful when using custom images) | `[]` | +| `primary.livenessProbe.enabled` | Enable livenessProbe on PostgreSQL Primary containers | `true` | +| `primary.livenessProbe.initialDelaySeconds` | Initial delay seconds for livenessProbe | `30` | +| `primary.livenessProbe.periodSeconds` | Period seconds for livenessProbe | `10` | +| `primary.livenessProbe.timeoutSeconds` | Timeout seconds for livenessProbe | `5` | +| `primary.livenessProbe.failureThreshold` | Failure threshold for livenessProbe | `6` | +| `primary.livenessProbe.successThreshold` | Success threshold for livenessProbe | `1` | +| `primary.readinessProbe.enabled` | Enable readinessProbe on PostgreSQL Primary containers | `true` | +| `primary.readinessProbe.initialDelaySeconds` | Initial delay seconds for readinessProbe | `5` | +| `primary.readinessProbe.periodSeconds` | Period seconds for readinessProbe | `10` | +| `primary.readinessProbe.timeoutSeconds` | Timeout seconds for readinessProbe | `5` | +| `primary.readinessProbe.failureThreshold` | Failure threshold for readinessProbe | `6` | +| `primary.readinessProbe.successThreshold` | Success threshold for readinessProbe | `1` | +| `primary.startupProbe.enabled` | Enable startupProbe on PostgreSQL Primary containers | `false` | +| `primary.startupProbe.initialDelaySeconds` | Initial delay seconds for startupProbe | `30` | +| `primary.startupProbe.periodSeconds` | Period seconds for startupProbe | `10` | +| `primary.startupProbe.timeoutSeconds` | Timeout seconds for startupProbe | `1` | +| `primary.startupProbe.failureThreshold` | Failure threshold for startupProbe | `15` | +| `primary.startupProbe.successThreshold` | Success threshold for startupProbe | `1` | +| `primary.customLivenessProbe` | Custom livenessProbe that overrides the default one | `{}` | +| `primary.customReadinessProbe` | Custom readinessProbe that overrides the default one | `{}` | +| `primary.customStartupProbe` | Custom startupProbe that overrides the default one | `{}` | +| `primary.lifecycleHooks` | for the PostgreSQL Primary container to automate configuration before or after startup | `{}` | +| `primary.resources.limits` | The resources limits for the PostgreSQL Primary containers | `{}` | +| `primary.resources.requests.memory` | The requested memory for the PostgreSQL Primary containers | `256Mi` | +| `primary.resources.requests.cpu` | The requested cpu for the PostgreSQL Primary containers | `250m` | +| `primary.podSecurityContext.enabled` | Enable security context | `true` | +| `primary.podSecurityContext.fsGroup` | Group ID for the pod | `1001` | +| `primary.containerSecurityContext.enabled` | Enable container security context | `true` | +| `primary.containerSecurityContext.runAsUser` | User ID for the container | `1001` | +| `primary.hostAliases` | PostgreSQL primary pods host aliases | `[]` | +| `primary.hostNetwork` | Specify if host network should be enabled for PostgreSQL pod (postgresql primary) | `false` | +| `primary.hostIPC` | Specify if host IPC should be enabled for PostgreSQL pod (postgresql primary) | `false` | +| `primary.labels` | Map of labels to add to the statefulset (postgresql primary) | `{}` | +| `primary.annotations` | Annotations for PostgreSQL primary pods | `{}` | +| `primary.podLabels` | Map of labels to add to the pods (postgresql primary) | `{}` | +| `primary.podAnnotations` | Map of annotations to add to the pods (postgresql primary) | `{}` | +| `primary.podAffinityPreset` | PostgreSQL primary pod affinity preset. Ignored if `primary.affinity` is set. Allowed values: `soft` or `hard` | `""` | +| `primary.podAntiAffinityPreset` | PostgreSQL primary pod anti-affinity preset. Ignored if `primary.affinity` is set. Allowed values: `soft` or `hard` | `soft` | +| `primary.nodeAffinityPreset.type` | PostgreSQL primary node affinity preset type. Ignored if `primary.affinity` is set. Allowed values: `soft` or `hard` | `""` | +| `primary.nodeAffinityPreset.key` | PostgreSQL primary node label key to match Ignored if `primary.affinity` is set. | `""` | +| `primary.nodeAffinityPreset.values` | PostgreSQL primary node label values to match. Ignored if `primary.affinity` is set. | `[]` | +| `primary.affinity` | Affinity for PostgreSQL primary pods assignment | `{}` | +| `primary.nodeSelector` | Node labels for PostgreSQL primary pods assignment | `{}` | +| `primary.tolerations` | Tolerations for PostgreSQL primary pods assignment | `[]` | +| `primary.topologySpreadConstraints` | Topology Spread Constraints for pod assignment spread across your cluster among failure-domains. Evaluated as a template | `[]` | +| `primary.priorityClassName` | Priority Class to use for each pod (postgresql primary) | `""` | +| `primary.schedulerName` | Use an alternate scheduler, e.g. "stork". | `""` | +| `primary.terminationGracePeriodSeconds` | Seconds PostgreSQL primary pod needs to terminate gracefully | `""` | +| `primary.updateStrategy.type` | PostgreSQL Primary statefulset strategy type | `RollingUpdate` | +| `primary.updateStrategy.rollingUpdate` | PostgreSQL Primary statefulset rolling update configuration parameters | `{}` | +| `primary.extraVolumeMounts` | Optionally specify extra list of additional volumeMounts for the PostgreSQL Primary container(s) | `[]` | +| `primary.extraVolumes` | Optionally specify extra list of additional volumes for the PostgreSQL Primary pod(s) | `[]` | +| `primary.sidecars` | Add additional sidecar containers to the PostgreSQL Primary pod(s) | `[]` | +| `primary.initContainers` | Add additional init containers to the PostgreSQL Primary pod(s) | `[]` | +| `primary.extraPodSpec` | Optionally specify extra PodSpec for the PostgreSQL Primary pod(s) | `{}` | +| `primary.service.type` | Kubernetes Service type | `ClusterIP` | +| `primary.service.ports.postgresql` | PostgreSQL service port | `5432` | +| `primary.service.nodePorts.postgresql` | Node port for PostgreSQL | `""` | +| `primary.service.clusterIP` | Static clusterIP or None for headless services | `""` | +| `primary.service.annotations` | Annotations for PostgreSQL primary service | `{}` | +| `primary.service.loadBalancerIP` | Load balancer IP if service type is `LoadBalancer` | `""` | +| `primary.service.externalTrafficPolicy` | Enable client source IP preservation | `Cluster` | +| `primary.service.loadBalancerSourceRanges` | Addresses that are allowed when service is LoadBalancer | `[]` | +| `primary.service.extraPorts` | Extra ports to expose in the PostgreSQL primary service | `[]` | +| `primary.service.sessionAffinity` | Session Affinity for Kubernetes service, can be "None" or "ClientIP" | `None` | +| `primary.service.sessionAffinityConfig` | Additional settings for the sessionAffinity | `{}` | +| `primary.persistence.enabled` | Enable PostgreSQL Primary data persistence using PVC | `true` | +| `primary.persistence.existingClaim` | Name of an existing PVC to use | `""` | +| `primary.persistence.mountPath` | The path the volume will be mounted at | `/bitnami/postgresql` | +| `primary.persistence.subPath` | The subdirectory of the volume to mount to | `""` | +| `primary.persistence.storageClass` | PVC Storage Class for PostgreSQL Primary data volume | `""` | +| `primary.persistence.accessModes` | PVC Access Mode for PostgreSQL volume | `["ReadWriteOnce"]` | +| `primary.persistence.size` | PVC Storage Request for PostgreSQL volume | `8Gi` | +| `primary.persistence.annotations` | Annotations for the PVC | `{}` | +| `primary.persistence.selector` | Selector to match an existing Persistent Volume (this value is evaluated as a template) | `{}` | +| `primary.persistence.dataSource` | Custom PVC data source | `{}` | + + +### PostgreSQL read only replica parameters + +| Name | Description | Value | +| ------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------ | --------------------- | +| `readReplicas.replicaCount` | Number of PostgreSQL read only replicas | `1` | +| `readReplicas.extraEnvVars` | Array with extra environment variables to add to PostgreSQL read only nodes | `[]` | +| `readReplicas.extraEnvVarsCM` | Name of existing ConfigMap containing extra env vars for PostgreSQL read only nodes | `""` | +| `readReplicas.extraEnvVarsSecret` | Name of existing Secret containing extra env vars for PostgreSQL read only nodes | `""` | +| `readReplicas.command` | Override default container command (useful when using custom images) | `[]` | +| `readReplicas.args` | Override default container args (useful when using custom images) | `[]` | +| `readReplicas.livenessProbe.enabled` | Enable livenessProbe on PostgreSQL read only containers | `true` | +| `readReplicas.livenessProbe.initialDelaySeconds` | Initial delay seconds for livenessProbe | `30` | +| `readReplicas.livenessProbe.periodSeconds` | Period seconds for livenessProbe | `10` | +| `readReplicas.livenessProbe.timeoutSeconds` | Timeout seconds for livenessProbe | `5` | +| `readReplicas.livenessProbe.failureThreshold` | Failure threshold for livenessProbe | `6` | +| `readReplicas.livenessProbe.successThreshold` | Success threshold for livenessProbe | `1` | +| `readReplicas.readinessProbe.enabled` | Enable readinessProbe on PostgreSQL read only containers | `true` | +| `readReplicas.readinessProbe.initialDelaySeconds` | Initial delay seconds for readinessProbe | `5` | +| `readReplicas.readinessProbe.periodSeconds` | Period seconds for readinessProbe | `10` | +| `readReplicas.readinessProbe.timeoutSeconds` | Timeout seconds for readinessProbe | `5` | +| `readReplicas.readinessProbe.failureThreshold` | Failure threshold for readinessProbe | `6` | +| `readReplicas.readinessProbe.successThreshold` | Success threshold for readinessProbe | `1` | +| `readReplicas.startupProbe.enabled` | Enable startupProbe on PostgreSQL read only containers | `false` | +| `readReplicas.startupProbe.initialDelaySeconds` | Initial delay seconds for startupProbe | `30` | +| `readReplicas.startupProbe.periodSeconds` | Period seconds for startupProbe | `10` | +| `readReplicas.startupProbe.timeoutSeconds` | Timeout seconds for startupProbe | `1` | +| `readReplicas.startupProbe.failureThreshold` | Failure threshold for startupProbe | `15` | +| `readReplicas.startupProbe.successThreshold` | Success threshold for startupProbe | `1` | +| `readReplicas.customLivenessProbe` | Custom livenessProbe that overrides the default one | `{}` | +| `readReplicas.customReadinessProbe` | Custom readinessProbe that overrides the default one | `{}` | +| `readReplicas.customStartupProbe` | Custom startupProbe that overrides the default one | `{}` | +| `readReplicas.lifecycleHooks` | for the PostgreSQL read only container to automate configuration before or after startup | `{}` | +| `readReplicas.resources.limits` | The resources limits for the PostgreSQL read only containers | `{}` | +| `readReplicas.resources.requests.memory` | The requested memory for the PostgreSQL read only containers | `256Mi` | +| `readReplicas.resources.requests.cpu` | The requested cpu for the PostgreSQL read only containers | `250m` | +| `readReplicas.podSecurityContext.enabled` | Enable security context | `true` | +| `readReplicas.podSecurityContext.fsGroup` | Group ID for the pod | `1001` | +| `readReplicas.containerSecurityContext.enabled` | Enable container security context | `true` | +| `readReplicas.containerSecurityContext.runAsUser` | User ID for the container | `1001` | +| `readReplicas.hostAliases` | PostgreSQL read only pods host aliases | `[]` | +| `readReplicas.hostNetwork` | Specify if host network should be enabled for PostgreSQL pod (PostgreSQL read only) | `false` | +| `readReplicas.hostIPC` | Specify if host IPC should be enabled for PostgreSQL pod (postgresql primary) | `false` | +| `readReplicas.labels` | Map of labels to add to the statefulset (PostgreSQL read only) | `{}` | +| `readReplicas.annotations` | Annotations for PostgreSQL read only pods | `{}` | +| `readReplicas.podLabels` | Map of labels to add to the pods (PostgreSQL read only) | `{}` | +| `readReplicas.podAnnotations` | Map of annotations to add to the pods (PostgreSQL read only) | `{}` | +| `readReplicas.podAffinityPreset` | PostgreSQL read only pod affinity preset. Ignored if `primary.affinity` is set. Allowed values: `soft` or `hard` | `""` | +| `readReplicas.podAntiAffinityPreset` | PostgreSQL read only pod anti-affinity preset. Ignored if `primary.affinity` is set. Allowed values: `soft` or `hard` | `soft` | +| `readReplicas.nodeAffinityPreset.type` | PostgreSQL read only node affinity preset type. Ignored if `primary.affinity` is set. Allowed values: `soft` or `hard` | `""` | +| `readReplicas.nodeAffinityPreset.key` | PostgreSQL read only node label key to match Ignored if `primary.affinity` is set. | `""` | +| `readReplicas.nodeAffinityPreset.values` | PostgreSQL read only node label values to match. Ignored if `primary.affinity` is set. | `[]` | +| `readReplicas.affinity` | Affinity for PostgreSQL read only pods assignment | `{}` | +| `readReplicas.nodeSelector` | Node labels for PostgreSQL read only pods assignment | `{}` | +| `readReplicas.tolerations` | Tolerations for PostgreSQL read only pods assignment | `[]` | +| `readReplicas.topologySpreadConstraints` | Topology Spread Constraints for pod assignment spread across your cluster among failure-domains. Evaluated as a template | `[]` | +| `readReplicas.priorityClassName` | Priority Class to use for each pod (PostgreSQL read only) | `""` | +| `readReplicas.schedulerName` | Use an alternate scheduler, e.g. "stork". | `""` | +| `readReplicas.terminationGracePeriodSeconds` | Seconds PostgreSQL read only pod needs to terminate gracefully | `""` | +| `readReplicas.updateStrategy.type` | PostgreSQL read only statefulset strategy type | `RollingUpdate` | +| `readReplicas.updateStrategy.rollingUpdate` | PostgreSQL read only statefulset rolling update configuration parameters | `{}` | +| `readReplicas.extraVolumeMounts` | Optionally specify extra list of additional volumeMounts for the PostgreSQL read only container(s) | `[]` | +| `readReplicas.extraVolumes` | Optionally specify extra list of additional volumes for the PostgreSQL read only pod(s) | `[]` | +| `readReplicas.sidecars` | Add additional sidecar containers to the PostgreSQL read only pod(s) | `[]` | +| `readReplicas.initContainers` | Add additional init containers to the PostgreSQL read only pod(s) | `[]` | +| `readReplicas.extraPodSpec` | Optionally specify extra PodSpec for the PostgreSQL read only pod(s) | `{}` | +| `readReplicas.service.type` | Kubernetes Service type | `ClusterIP` | +| `readReplicas.service.ports.postgresql` | PostgreSQL service port | `5432` | +| `readReplicas.service.nodePorts.postgresql` | Node port for PostgreSQL | `""` | +| `readReplicas.service.clusterIP` | Static clusterIP or None for headless services | `""` | +| `readReplicas.service.annotations` | Annotations for PostgreSQL read only service | `{}` | +| `readReplicas.service.loadBalancerIP` | Load balancer IP if service type is `LoadBalancer` | `""` | +| `readReplicas.service.externalTrafficPolicy` | Enable client source IP preservation | `Cluster` | +| `readReplicas.service.loadBalancerSourceRanges` | Addresses that are allowed when service is LoadBalancer | `[]` | +| `readReplicas.service.extraPorts` | Extra ports to expose in the PostgreSQL read only service | `[]` | +| `readReplicas.service.sessionAffinity` | Session Affinity for Kubernetes service, can be "None" or "ClientIP" | `None` | +| `readReplicas.service.sessionAffinityConfig` | Additional settings for the sessionAffinity | `{}` | +| `readReplicas.persistence.enabled` | Enable PostgreSQL read only data persistence using PVC | `true` | +| `readReplicas.persistence.mountPath` | The path the volume will be mounted at | `/bitnami/postgresql` | +| `readReplicas.persistence.subPath` | The subdirectory of the volume to mount to | `""` | +| `readReplicas.persistence.storageClass` | PVC Storage Class for PostgreSQL read only data volume | `""` | +| `readReplicas.persistence.accessModes` | PVC Access Mode for PostgreSQL volume | `["ReadWriteOnce"]` | +| `readReplicas.persistence.size` | PVC Storage Request for PostgreSQL volume | `8Gi` | +| `readReplicas.persistence.annotations` | Annotations for the PVC | `{}` | +| `readReplicas.persistence.selector` | Selector to match an existing Persistent Volume (this value is evaluated as a template) | `{}` | +| `readReplicas.persistence.dataSource` | Custom PVC data source | `{}` | + + +### NetworkPolicy parameters + +| Name | Description | Value | +| ------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------- | ------- | +| `networkPolicy.enabled` | Enable network policies | `false` | +| `networkPolicy.metrics.enabled` | Enable network policies for metrics (prometheus) | `false` | +| `networkPolicy.metrics.namespaceSelector` | Monitoring namespace selector labels. These labels will be used to identify the prometheus' namespace. | `{}` | +| `networkPolicy.metrics.podSelector` | Monitoring pod selector labels. These labels will be used to identify the Prometheus pods. | `{}` | +| `networkPolicy.ingressRules.primaryAccessOnlyFrom.enabled` | Enable ingress rule that makes PostgreSQL primary node only accessible from a particular origin. | `false` | +| `networkPolicy.ingressRules.primaryAccessOnlyFrom.namespaceSelector` | Namespace selector label that is allowed to access the PostgreSQL primary node. This label will be used to identified the allowed namespace(s). | `{}` | +| `networkPolicy.ingressRules.primaryAccessOnlyFrom.podSelector` | Pods selector label that is allowed to access the PostgreSQL primary node. This label will be used to identified the allowed pod(s). | `{}` | +| `networkPolicy.ingressRules.primaryAccessOnlyFrom.customRules` | Custom network policy for the PostgreSQL primary node. | `{}` | +| `networkPolicy.ingressRules.readReplicasAccessOnlyFrom.enabled` | Enable ingress rule that makes PostgreSQL read-only nodes only accessible from a particular origin. | `false` | +| `networkPolicy.ingressRules.readReplicasAccessOnlyFrom.namespaceSelector` | Namespace selector label that is allowed to access the PostgreSQL read-only nodes. This label will be used to identified the allowed namespace(s). | `{}` | +| `networkPolicy.ingressRules.readReplicasAccessOnlyFrom.podSelector` | Pods selector label that is allowed to access the PostgreSQL read-only nodes. This label will be used to identified the allowed pod(s). | `{}` | +| `networkPolicy.ingressRules.readReplicasAccessOnlyFrom.customRules` | Custom network policy for the PostgreSQL read-only nodes. | `{}` | +| `networkPolicy.egressRules.denyConnectionsToExternal` | Enable egress rule that denies outgoing traffic outside the cluster, except for DNS (port 53). | `false` | +| `networkPolicy.egressRules.customRules` | Custom network policy rule | `{}` | + + +### Volume Permissions parameters + +| Name | Description | Value | +| ------------------------------------------------------ | ------------------------------------------------------------------------------- | ----------------------- | +| `volumePermissions.enabled` | Enable init container that changes the owner and group of the persistent volume | `false` | +| `volumePermissions.image.registry` | Init container volume-permissions image registry | `docker.io` | +| `volumePermissions.image.repository` | Init container volume-permissions image repository | `bitnami/bitnami-shell` | +| `volumePermissions.image.tag` | Init container volume-permissions image tag (immutable tags are recommended) | `11-debian-11-r9` | +| `volumePermissions.image.pullPolicy` | Init container volume-permissions image pull policy | `IfNotPresent` | +| `volumePermissions.image.pullSecrets` | Init container volume-permissions image pull secrets | `[]` | +| `volumePermissions.resources.limits` | Init container volume-permissions resource limits | `{}` | +| `volumePermissions.resources.requests` | Init container volume-permissions resource requests | `{}` | +| `volumePermissions.containerSecurityContext.runAsUser` | User ID for the init container | `0` | + + +### Other Parameters + +| Name | Description | Value | +| --------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------- | ------- | +| `serviceAccount.create` | Enable creation of ServiceAccount for PostgreSQL pod | `false` | +| `serviceAccount.name` | The name of the ServiceAccount to use. | `""` | +| `serviceAccount.automountServiceAccountToken` | Allows auto mount of ServiceAccountToken on the serviceAccount created | `true` | +| `serviceAccount.annotations` | Additional custom annotations for the ServiceAccount | `{}` | +| `rbac.create` | Create Role and RoleBinding (required for PSP to work) | `false` | +| `rbac.rules` | Custom RBAC rules to set | `[]` | +| `psp.create` | Whether to create a PodSecurityPolicy. WARNING: PodSecurityPolicy is deprecated in Kubernetes v1.21 or later, unavailable in v1.25 or later | `false` | + + +### Metrics Parameters + +| Name | Description | Value | +| ----------------------------------------------- | ------------------------------------------------------------------------------------- | --------------------------- | +| `metrics.enabled` | Start a prometheus exporter | `false` | +| `metrics.image.registry` | PostgreSQL Prometheus Exporter image registry | `docker.io` | +| `metrics.image.repository` | PostgreSQL Prometheus Exporter image repository | `bitnami/postgres-exporter` | +| `metrics.image.tag` | PostgreSQL Prometheus Exporter image tag (immutable tags are recommended) | `0.10.1-debian-11-r9` | +| `metrics.image.pullPolicy` | PostgreSQL Prometheus Exporter image pull policy | `IfNotPresent` | +| `metrics.image.pullSecrets` | Specify image pull secrets | `[]` | +| `metrics.customMetrics` | Define additional custom metrics | `{}` | +| `metrics.extraEnvVars` | Extra environment variables to add to PostgreSQL Prometheus exporter | `[]` | +| `metrics.containerSecurityContext.enabled` | Enable PostgreSQL Prometheus exporter containers' Security Context | `true` | +| `metrics.containerSecurityContext.runAsUser` | Set PostgreSQL Prometheus exporter containers' Security Context runAsUser | `1001` | +| `metrics.containerSecurityContext.runAsNonRoot` | Set PostgreSQL Prometheus exporter containers' Security Context runAsNonRoot | `true` | +| `metrics.livenessProbe.enabled` | Enable livenessProbe on PostgreSQL Prometheus exporter containers | `true` | +| `metrics.livenessProbe.initialDelaySeconds` | Initial delay seconds for livenessProbe | `5` | +| `metrics.livenessProbe.periodSeconds` | Period seconds for livenessProbe | `10` | +| `metrics.livenessProbe.timeoutSeconds` | Timeout seconds for livenessProbe | `5` | +| `metrics.livenessProbe.failureThreshold` | Failure threshold for livenessProbe | `6` | +| `metrics.livenessProbe.successThreshold` | Success threshold for livenessProbe | `1` | +| `metrics.readinessProbe.enabled` | Enable readinessProbe on PostgreSQL Prometheus exporter containers | `true` | +| `metrics.readinessProbe.initialDelaySeconds` | Initial delay seconds for readinessProbe | `5` | +| `metrics.readinessProbe.periodSeconds` | Period seconds for readinessProbe | `10` | +| `metrics.readinessProbe.timeoutSeconds` | Timeout seconds for readinessProbe | `5` | +| `metrics.readinessProbe.failureThreshold` | Failure threshold for readinessProbe | `6` | +| `metrics.readinessProbe.successThreshold` | Success threshold for readinessProbe | `1` | +| `metrics.startupProbe.enabled` | Enable startupProbe on PostgreSQL Prometheus exporter containers | `false` | +| `metrics.startupProbe.initialDelaySeconds` | Initial delay seconds for startupProbe | `10` | +| `metrics.startupProbe.periodSeconds` | Period seconds for startupProbe | `10` | +| `metrics.startupProbe.timeoutSeconds` | Timeout seconds for startupProbe | `1` | +| `metrics.startupProbe.failureThreshold` | Failure threshold for startupProbe | `15` | +| `metrics.startupProbe.successThreshold` | Success threshold for startupProbe | `1` | +| `metrics.customLivenessProbe` | Custom livenessProbe that overrides the default one | `{}` | +| `metrics.customReadinessProbe` | Custom readinessProbe that overrides the default one | `{}` | +| `metrics.customStartupProbe` | Custom startupProbe that overrides the default one | `{}` | +| `metrics.containerPorts.metrics` | PostgreSQL Prometheus exporter metrics container port | `9187` | +| `metrics.resources.limits` | The resources limits for the PostgreSQL Prometheus exporter container | `{}` | +| `metrics.resources.requests` | The requested resources for the PostgreSQL Prometheus exporter container | `{}` | +| `metrics.service.ports.metrics` | PostgreSQL Prometheus Exporter service port | `9187` | +| `metrics.service.clusterIP` | Static clusterIP or None for headless services | `""` | +| `metrics.service.sessionAffinity` | Control where client requests go, to the same pod or round-robin | `None` | +| `metrics.service.annotations` | Annotations for Prometheus to auto-discover the metrics endpoint | `{}` | +| `metrics.serviceMonitor.enabled` | Create ServiceMonitor Resource for scraping metrics using Prometheus Operator | `false` | +| `metrics.serviceMonitor.namespace` | Namespace for the ServiceMonitor Resource (defaults to the Release Namespace) | `""` | +| `metrics.serviceMonitor.interval` | Interval at which metrics should be scraped. | `""` | +| `metrics.serviceMonitor.scrapeTimeout` | Timeout after which the scrape is ended | `""` | +| `metrics.serviceMonitor.labels` | Additional labels that can be used so ServiceMonitor will be discovered by Prometheus | `{}` | +| `metrics.serviceMonitor.selector` | Prometheus instance selector labels | `{}` | +| `metrics.serviceMonitor.relabelings` | RelabelConfigs to apply to samples before scraping | `[]` | +| `metrics.serviceMonitor.metricRelabelings` | MetricRelabelConfigs to apply to samples before ingestion | `[]` | +| `metrics.serviceMonitor.honorLabels` | Specify honorLabels parameter to add the scrape endpoint | `false` | +| `metrics.serviceMonitor.jobLabel` | The name of the label on the target service to use as the job name in prometheus. | `""` | +| `metrics.prometheusRule.enabled` | Create a PrometheusRule for Prometheus Operator | `false` | +| `metrics.prometheusRule.namespace` | Namespace for the PrometheusRule Resource (defaults to the Release Namespace) | `""` | +| `metrics.prometheusRule.labels` | Additional labels that can be used so PrometheusRule will be discovered by Prometheus | `{}` | +| `metrics.prometheusRule.rules` | PrometheusRule definitions | `[]` | + + +Specify each parameter using the `--set key=value[,key=value]` argument to `helm install`. For example, + +```bash +$ helm install my-release \ + --set auth.postgresPassword=secretpassword + bitnami/postgresql +``` + +The above command sets the PostgreSQL `postgres` account password to `secretpassword`. + +> NOTE: Once this chart is deployed, it is not possible to change the application's access credentials, such as usernames or passwords, using Helm. To change these application credentials after deployment, delete any persistent volumes (PVs) used by the chart and re-deploy it, or use the application's built-in administrative tools if available. + +Alternatively, a YAML file that specifies the values for the parameters can be provided while installing the chart. For example, + +```bash +helm install my-release -f values.yaml bitnami/postgresql +``` + +> **Tip**: You can use the default [values.yaml](values.yaml) + +## Configuration and installation details + +### [Rolling VS Immutable tags](https://docs.bitnami.com/containers/how-to/understand-rolling-tags-containers/) + +It is strongly recommended to use immutable tags in a production environment. This ensures your deployment does not change automatically if the same tag is updated with a different image. + +Bitnami will release a new chart updating its containers if a new version of the main container, significant changes, or critical vulnerabilities exist. + +### Customizing primary and read replica services in a replicated configuration + +At the top level, there is a service object which defines the services for both primary and readReplicas. For deeper customization, there are service objects for both the primary and read types individually. This allows you to override the values in the top level service object so that the primary and read can be of different service types and with different clusterIPs / nodePorts. Also in the case you want the primary and read to be of type nodePort, you will need to set the nodePorts to different values to prevent a collision. The values that are deeper in the primary.service or readReplicas.service objects will take precedence over the top level service object. + +### Use a different PostgreSQL version + +To modify the application version used in this chart, specify a different version of the image using the `image.tag` parameter and/or a different repository using the `image.repository` parameter. Refer to the [chart documentation for more information on these parameters and how to use them with images from a private registry](https://docs.bitnami.com/kubernetes/infrastructure/postgresql/configuration/change-image-version/). + +### postgresql.conf / pg_hba.conf files as configMap + +This helm chart also supports to customize the PostgreSQL configuration file. You can add additional PostgreSQL configuration parameters using the `primary.extendedConfiguration` parameter as a string. Alternatively, to replace the entire default configuration use `primary.configuration`. + +You can also add a custom pg_hba.conf using the `primary.pgHbaConfiguration` parameter. + +In addition to these options, you can also set an external ConfigMap with all the configuration files. This is done by setting the `primary.existingConfigmap` parameter. Note that this will override the two previous options. + +### Initialize a fresh instance + +The [Bitnami PostgreSQL](https://github.com/bitnami/bitnami-docker-postgresql) image allows you to use your custom scripts to initialize a fresh instance. In order to execute the scripts, you can specify custom scripts using the `primary.initdb.scripts` parameter as a string. + +In addition, you can also set an external ConfigMap with all the initialization scripts. This is done by setting the `primary.initdb.scriptsConfigMap` parameter. Note that this will override the two previous options. If your initialization scripts contain sensitive information such as credentials or passwords, you can use the `primary.initdb.scriptsSecret` parameter. + +The allowed extensions are `.sh`, `.sql` and `.sql.gz`. + +### Securing traffic using TLS + +TLS support can be enabled in the chart by specifying the `tls.` parameters while creating a release. The following parameters should be configured to properly enable the TLS support in the chart: + +- `tls.enabled`: Enable TLS support. Defaults to `false` +- `tls.certificatesSecret`: Name of an existing secret that contains the certificates. No defaults. +- `tls.certFilename`: Certificate filename. No defaults. +- `tls.certKeyFilename`: Certificate key filename. No defaults. + +For example: + +- First, create the secret with the cetificates files: + + ```console + kubectl create secret generic certificates-tls-secret --from-file=./cert.crt --from-file=./cert.key --from-file=./ca.crt + ``` + +- Then, use the following parameters: + + ```console + volumePermissions.enabled=true + tls.enabled=true + tls.certificatesSecret="certificates-tls-secret" + tls.certFilename="cert.crt" + tls.certKeyFilename="cert.key" + ``` + + > Note TLS and VolumePermissions: PostgreSQL requires certain permissions on sensitive files (such as certificate keys) to start up. Due to an on-going [issue](https://github.com/kubernetes/kubernetes/issues/57923) regarding kubernetes permissions and the use of `containerSecurityContext.runAsUser`, you must enable `volumePermissions` to ensure everything works as expected. + +### Sidecars + +If you need additional containers to run within the same pod as PostgreSQL (e.g. an additional metrics or logging exporter), you can do so via the `sidecars` config parameter. Simply define your container according to the Kubernetes container spec. + +```yaml +# For the PostgreSQL primary +primary: + sidecars: + - name: your-image-name + image: your-image + imagePullPolicy: Always + ports: + - name: portname + containerPort: 1234 +# For the PostgreSQL replicas +readReplicas: + sidecars: + - name: your-image-name + image: your-image + imagePullPolicy: Always + ports: + - name: portname + containerPort: 1234 +``` + +### Metrics + +The chart optionally can start a metrics exporter for [prometheus](https://prometheus.io). The metrics endpoint (port 9187) is not exposed and it is expected that the metrics are collected from inside the k8s cluster using something similar as the described in the [example Prometheus scrape configuration](https://github.com/prometheus/prometheus/blob/master/documentation/examples/prometheus-kubernetes.yml). + +The exporter allows to create custom metrics from additional SQL queries. See the Chart's `values.yaml` for an example and consult the [exporters documentation](https://github.com/wrouesnel/postgres_exporter#adding-new-metrics-via-a-config-file) for more details. + +### Use of global variables + +In more complex scenarios, we may have the following tree of dependencies + +``` + +--------------+ + | | + +------------+ Chart 1 +-----------+ + | | | | + | --------+------+ | + | | | + | | | + | | | + | | | + v v v ++-------+------+ +--------+------+ +--------+------+ +| | | | | | +| PostgreSQL | | Sub-chart 1 | | Sub-chart 2 | +| | | | | | ++--------------+ +---------------+ +---------------+ +``` + +The three charts below depend on the parent chart Chart 1. However, subcharts 1 and 2 may need to connect to PostgreSQL as well. In order to do so, subcharts 1 and 2 need to know the PostgreSQL credentials, so one option for deploying could be deploy Chart 1 with the following parameters: + +``` +postgresql.auth.username=testuser +subchart1.postgresql.auth.username=testuser +subchart2.postgresql.auth.username=testuser +postgresql.auth.password=testpass +subchart1.postgresql.auth.password=testpass +subchart2.postgresql.auth.password=testpass +postgresql.auth.database=testdb +subchart1.postgresql.auth.database=testdb +subchart2.postgresql.auth.database=testdb +``` + +If the number of dependent sub-charts increases, installing the chart with parameters can become increasingly difficult. An alternative would be to set the credentials using global variables as follows: + +``` +global.postgresql.auth.username=testuser +global.postgresql.auth.password=testpass +global.postgresql.auth.database=testdb +``` + +This way, the credentials will be available in all of the subcharts. + +## Persistence + +The [Bitnami PostgreSQL](https://github.com/bitnami/bitnami-docker-postgresql) image stores the PostgreSQL data and configurations at the `/bitnami/postgresql` path of the container. + +Persistent Volume Claims are used to keep the data across deployments. This is known to work in GCE, AWS, and minikube. +See the [Parameters](#parameters) section to configure the PVC or to disable persistence. + +If you already have data in it, you will fail to sync to standby nodes for all commits, details can refer to [code](https://github.com/bitnami/bitnami-docker-postgresql/blob/8725fe1d7d30ebe8d9a16e9175d05f7ad9260c93/9.6/debian-9/rootfs/libpostgresql.sh#L518-L556). If you need to use those data, please covert them to sql and import after `helm install` finished. + +## NetworkPolicy + +To enable network policy for PostgreSQL, install [a networking plugin that implements the Kubernetes NetworkPolicy spec](https://kubernetes.io/docs/tasks/administer-cluster/declare-network-policy#before-you-begin), and set `networkPolicy.enabled` to `true`. + +For Kubernetes v1.5 & v1.6, you must also turn on NetworkPolicy by setting the DefaultDeny namespace annotation. Note: this will enforce policy for _all_ pods in the namespace: + +```bash +kubectl annotate namespace default "net.beta.kubernetes.io/network-policy={\"ingress\":{\"isolation\":\"DefaultDeny\"}}" +``` + +With NetworkPolicy enabled, traffic will be limited to just port 5432. + +For more precise policy, set `networkPolicy.allowExternal=false`. This will only allow pods with the generated client label to connect to PostgreSQL. +This label will be displayed in the output of a successful install. + +## Differences between Bitnami PostgreSQL image and [Docker Official](https://hub.docker.com/_/postgres) image + +- The Docker Official PostgreSQL image does not support replication. If you pass any replication environment variable, this would be ignored. The only environment variables supported by the Docker Official image are POSTGRES_USER, POSTGRES_DB, POSTGRES_PASSWORD, POSTGRES_INITDB_ARGS, POSTGRES_INITDB_WALDIR and PGDATA. All the remaining environment variables are specific to the Bitnami PostgreSQL image. +- The Bitnami PostgreSQL image is non-root by default. This requires that you run the pod with `securityContext` and updates the permissions of the volume with an `initContainer`. A key benefit of this configuration is that the pod follows security best practices and is prepared to run on Kubernetes distributions with hard security constraints like OpenShift. +- For OpenShift, one may either define the runAsUser and fsGroup accordingly, or try this more dynamic option: volumePermissions.securityContext.runAsUser="auto",securityContext.enabled=false,containerSecurityContext.enabled=false,shmVolume.chmod.enabled=false + +### Setting Pod's affinity + +This chart allows you to set your custom affinity using the `XXX.affinity` parameter(s). Find more information about Pod's affinity in the [kubernetes documentation](https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity). + +As an alternative, you can use of the preset configurations for pod affinity, pod anti-affinity, and node affinity available at the [bitnami/common](https://github.com/bitnami/charts/tree/master/bitnami/common#affinities) chart. To do so, set the `XXX.podAffinityPreset`, `XXX.podAntiAffinityPreset`, or `XXX.nodeAffinityPreset` parameters. + +## Troubleshooting + +Find more information about how to deal with common errors related to Bitnami's Helm charts in [this troubleshooting guide](https://docs.bitnami.com/general/how-to/troubleshoot-helm-chart-issues). + +## Upgrading + +Refer to the [chart documentation for more information about how to upgrade from previous releases](https://docs.bitnami.com/kubernetes/infrastructure/postgresql/administration/upgrade/). + +## License + +Copyright © 2022 Bitnami + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. \ No newline at end of file diff --git a/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/charts/postgresql/charts/common/.helmignore b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/charts/postgresql/charts/common/.helmignore new file mode 100644 index 00000000..50af0317 --- /dev/null +++ b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/charts/postgresql/charts/common/.helmignore @@ -0,0 +1,22 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ diff --git a/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/charts/postgresql/charts/common/Chart.yaml b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/charts/postgresql/charts/common/Chart.yaml new file mode 100644 index 00000000..bd152e31 --- /dev/null +++ b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/charts/postgresql/charts/common/Chart.yaml @@ -0,0 +1,23 @@ +annotations: + category: Infrastructure +apiVersion: v2 +appVersion: 1.16.0 +description: A Library Helm Chart for grouping common logic between bitnami charts. + This chart is not deployable by itself. +home: https://github.com/bitnami/charts/tree/master/bitnami/common +icon: https://bitnami.com/downloads/logos/bitnami-mark.png +keywords: +- common +- helper +- template +- function +- bitnami +maintainers: +- name: Bitnami + url: https://github.com/bitnami/charts +name: common +sources: +- https://github.com/bitnami/charts +- https://www.bitnami.com/ +type: library +version: 1.16.0 diff --git a/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/charts/postgresql/charts/common/README.md b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/charts/postgresql/charts/common/README.md new file mode 100644 index 00000000..3b5e09c5 --- /dev/null +++ b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/charts/postgresql/charts/common/README.md @@ -0,0 +1,350 @@ +# Bitnami Common Library Chart + +A [Helm Library Chart](https://helm.sh/docs/topics/library_charts/#helm) for grouping common logic between bitnami charts. + +## TL;DR + +```yaml +dependencies: + - name: common + version: 1.x.x + repository: https://charts.bitnami.com/bitnami +``` + +```bash +$ helm dependency update +``` + +```yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ include "common.names.fullname" . }} +data: + myvalue: "Hello World" +``` + +## Introduction + +This chart provides a common template helpers which can be used to develop new charts using [Helm](https://helm.sh) package manager. + +Bitnami charts can be used with [Kubeapps](https://kubeapps.com/) for deployment and management of Helm Charts in clusters. This Helm chart has been tested on top of [Bitnami Kubernetes Production Runtime](https://kubeprod.io/) (BKPR). Deploy BKPR to get automated TLS certificates, logging and monitoring for your applications. + +## Prerequisites + +- Kubernetes 1.19+ +- Helm 3.2.0+ + +## Parameters + +The following table lists the helpers available in the library which are scoped in different sections. + +### Affinities + +| Helper identifier | Description | Expected Input | +|-------------------------------|------------------------------------------------------|------------------------------------------------| +| `common.affinities.nodes.soft` | Return a soft nodeAffinity definition | `dict "key" "FOO" "values" (list "BAR" "BAZ")` | +| `common.affinities.nodes.hard` | Return a hard nodeAffinity definition | `dict "key" "FOO" "values" (list "BAR" "BAZ")` | +| `common.affinities.pods.soft` | Return a soft podAffinity/podAntiAffinity definition | `dict "component" "FOO" "context" $` | +| `common.affinities.pods.hard` | Return a hard podAffinity/podAntiAffinity definition | `dict "component" "FOO" "context" $` | + +### Capabilities + +| Helper identifier | Description | Expected Input | +|------------------------------------------------|------------------------------------------------------------------------------------------------|-------------------| +| `common.capabilities.kubeVersion` | Return the target Kubernetes version (using client default if .Values.kubeVersion is not set). | `.` Chart context | +| `common.capabilities.cronjob.apiVersion` | Return the appropriate apiVersion for cronjob. | `.` Chart context | +| `common.capabilities.deployment.apiVersion` | Return the appropriate apiVersion for deployment. | `.` Chart context | +| `common.capabilities.statefulset.apiVersion` | Return the appropriate apiVersion for statefulset. | `.` Chart context | +| `common.capabilities.ingress.apiVersion` | Return the appropriate apiVersion for ingress. | `.` Chart context | +| `common.capabilities.rbac.apiVersion` | Return the appropriate apiVersion for RBAC resources. | `.` Chart context | +| `common.capabilities.crd.apiVersion` | Return the appropriate apiVersion for CRDs. | `.` Chart context | +| `common.capabilities.policy.apiVersion` | Return the appropriate apiVersion for podsecuritypolicy. | `.` Chart context | +| `common.capabilities.networkPolicy.apiVersion` | Return the appropriate apiVersion for networkpolicy. | `.` Chart context | +| `common.capabilities.apiService.apiVersion` | Return the appropriate apiVersion for APIService. | `.` Chart context | +| `common.capabilities.hpa.apiVersion` | Return the appropriate apiVersion for Horizontal Pod Autoscaler | `.` Chart context | +| `common.capabilities.supportsHelmVersion` | Returns true if the used Helm version is 3.3+ | `.` Chart context | + +### Errors + +| Helper identifier | Description | Expected Input | +|-----------------------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------|-------------------------------------------------------------------------------------| +| `common.errors.upgrade.passwords.empty` | It will ensure required passwords are given when we are upgrading a chart. If `validationErrors` is not empty it will throw an error and will stop the upgrade action. | `dict "validationErrors" (list $validationError00 $validationError01) "context" $` | + +### Images + +| Helper identifier | Description | Expected Input | +|-----------------------------|------------------------------------------------------|---------------------------------------------------------------------------------------------------------| +| `common.images.image` | Return the proper and full image name | `dict "imageRoot" .Values.path.to.the.image "global" $`, see [ImageRoot](#imageroot) for the structure. | +| `common.images.pullSecrets` | Return the proper Docker Image Registry Secret Names (deprecated: use common.images.renderPullSecrets instead) | `dict "images" (list .Values.path.to.the.image1, .Values.path.to.the.image2) "global" .Values.global` | +| `common.images.renderPullSecrets` | Return the proper Docker Image Registry Secret Names (evaluates values as templates) | `dict "images" (list .Values.path.to.the.image1, .Values.path.to.the.image2) "context" $` | + +### Ingress + +| Helper identifier | Description | Expected Input | +|-------------------------------------------|-------------------------------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `common.ingress.backend` | Generate a proper Ingress backend entry depending on the API version | `dict "serviceName" "foo" "servicePort" "bar"`, see the [Ingress deprecation notice](https://kubernetes.io/blog/2019/07/18/api-deprecations-in-1-16/) for the syntax differences | +| `common.ingress.supportsPathType` | Prints "true" if the pathType field is supported | `.` Chart context | +| `common.ingress.supportsIngressClassname` | Prints "true" if the ingressClassname field is supported | `.` Chart context | +| `common.ingress.certManagerRequest` | Prints "true" if required cert-manager annotations for TLS signed certificates are set in the Ingress annotations | `dict "annotations" .Values.path.to.the.ingress.annotations` | + +### Labels + +| Helper identifier | Description | Expected Input | +|-----------------------------|-----------------------------------------------------------------------------|-------------------| +| `common.labels.standard` | Return Kubernetes standard labels | `.` Chart context | +| `common.labels.matchLabels` | Labels to use on `deploy.spec.selector.matchLabels` and `svc.spec.selector` | `.` Chart context | + +### Names + +| Helper identifier | Description | Expected Input | +|-----------------------------------|-----------------------------------------------------------------------|-------------------| +| `common.names.name` | Expand the name of the chart or use `.Values.nameOverride` | `.` Chart context | +| `common.names.fullname` | Create a default fully qualified app name. | `.` Chart context | +| `common.names.namespace` | Allow the release namespace to be overridden | `.` Chart context | +| `common.names.fullname.namespace` | Create a fully qualified app name adding the installation's namespace | `.` Chart context | +| `common.names.chart` | Chart name plus version | `.` Chart context | + +### Secrets + +| Helper identifier | Description | Expected Input | +|---------------------------|--------------------------------------------------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `common.secrets.name` | Generate the name of the secret. | `dict "existingSecret" .Values.path.to.the.existingSecret "defaultNameSuffix" "mySuffix" "context" $` see [ExistingSecret](#existingsecret) for the structure. | +| `common.secrets.key` | Generate secret key. | `dict "existingSecret" .Values.path.to.the.existingSecret "key" "keyName"` see [ExistingSecret](#existingsecret) for the structure. | +| `common.passwords.manage` | Generate secret password or retrieve one if already created. | `dict "secret" "secret-name" "key" "keyName" "providedValues" (list "path.to.password1" "path.to.password2") "length" 10 "strong" false "chartName" "chartName" "context" $`, length, strong and chartNAme fields are optional. | +| `common.secrets.exists` | Returns whether a previous generated secret already exists. | `dict "secret" "secret-name" "context" $` | + +### Storage + +| Helper identifier | Description | Expected Input | +|-------------------------------|---------------------------------------|---------------------------------------------------------------------------------------------------------------------| +| `common.storage.class` | Return the proper Storage Class | `dict "persistence" .Values.path.to.the.persistence "global" $`, see [Persistence](#persistence) for the structure. | + +### TplValues + +| Helper identifier | Description | Expected Input | +|---------------------------|----------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------| +| `common.tplvalues.render` | Renders a value that contains template | `dict "value" .Values.path.to.the.Value "context" $`, value is the value should rendered as template, context frequently is the chart context `$` or `.` | + +### Utils + +| Helper identifier | Description | Expected Input | +|--------------------------------|------------------------------------------------------------------------------------------|------------------------------------------------------------------------| +| `common.utils.fieldToEnvVar` | Build environment variable name given a field. | `dict "field" "my-password"` | +| `common.utils.secret.getvalue` | Print instructions to get a secret value. | `dict "secret" "secret-name" "field" "secret-value-field" "context" $` | +| `common.utils.getValueFromKey` | Gets a value from `.Values` object given its key path | `dict "key" "path.to.key" "context" $` | +| `common.utils.getKeyFromList` | Returns first `.Values` key with a defined value or first of the list if all non-defined | `dict "keys" (list "path.to.key1" "path.to.key2") "context" $` | + +### Validations + +| Helper identifier | Description | Expected Input | +|--------------------------------------------------|-------------------------------------------------------------------------------------------------------------------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `common.validations.values.single.empty` | Validate a value must not be empty. | `dict "valueKey" "path.to.value" "secret" "secret.name" "field" "my-password" "subchart" "subchart" "context" $` secret, field and subchart are optional. In case they are given, the helper will generate a how to get instruction. See [ValidateValue](#validatevalue) | +| `common.validations.values.multiple.empty` | Validate a multiple values must not be empty. It returns a shared error for all the values. | `dict "required" (list $validateValueConf00 $validateValueConf01) "context" $`. See [ValidateValue](#validatevalue) | +| `common.validations.values.mariadb.passwords` | This helper will ensure required password for MariaDB are not empty. It returns a shared error for all the values. | `dict "secret" "mariadb-secret" "subchart" "true" "context" $` subchart field is optional and could be true or false it depends on where you will use mariadb chart and the helper. | +| `common.validations.values.mysql.passwords` | This helper will ensure required password for MySQL are not empty. It returns a shared error for all the values. | `dict "secret" "mysql-secret" "subchart" "true" "context" $` subchart field is optional and could be true or false it depends on where you will use mysql chart and the helper. | +| `common.validations.values.postgresql.passwords` | This helper will ensure required password for PostgreSQL are not empty. It returns a shared error for all the values. | `dict "secret" "postgresql-secret" "subchart" "true" "context" $` subchart field is optional and could be true or false it depends on where you will use postgresql chart and the helper. | +| `common.validations.values.redis.passwords` | This helper will ensure required password for Redis® are not empty. It returns a shared error for all the values. | `dict "secret" "redis-secret" "subchart" "true" "context" $` subchart field is optional and could be true or false it depends on where you will use redis chart and the helper. | +| `common.validations.values.cassandra.passwords` | This helper will ensure required password for Cassandra are not empty. It returns a shared error for all the values. | `dict "secret" "cassandra-secret" "subchart" "true" "context" $` subchart field is optional and could be true or false it depends on where you will use cassandra chart and the helper. | +| `common.validations.values.mongodb.passwords` | This helper will ensure required password for MongoDB® are not empty. It returns a shared error for all the values. | `dict "secret" "mongodb-secret" "subchart" "true" "context" $` subchart field is optional and could be true or false it depends on where you will use mongodb chart and the helper. | + +### Warnings + +| Helper identifier | Description | Expected Input | +|------------------------------|----------------------------------|------------------------------------------------------------| +| `common.warnings.rollingTag` | Warning about using rolling tag. | `ImageRoot` see [ImageRoot](#imageroot) for the structure. | + +## Special input schemas + +### ImageRoot + +```yaml +registry: + type: string + description: Docker registry where the image is located + example: docker.io + +repository: + type: string + description: Repository and image name + example: bitnami/nginx + +tag: + type: string + description: image tag + example: 1.16.1-debian-10-r63 + +pullPolicy: + type: string + description: Specify a imagePullPolicy. Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + +pullSecrets: + type: array + items: + type: string + description: Optionally specify an array of imagePullSecrets (evaluated as templates). + +debug: + type: boolean + description: Set to true if you would like to see extra information on logs + example: false + +## An instance would be: +# registry: docker.io +# repository: bitnami/nginx +# tag: 1.16.1-debian-10-r63 +# pullPolicy: IfNotPresent +# debug: false +``` + +### Persistence + +```yaml +enabled: + type: boolean + description: Whether enable persistence. + example: true + +storageClass: + type: string + description: Ghost data Persistent Volume Storage Class, If set to "-", storageClassName: "" which disables dynamic provisioning. + example: "-" + +accessMode: + type: string + description: Access mode for the Persistent Volume Storage. + example: ReadWriteOnce + +size: + type: string + description: Size the Persistent Volume Storage. + example: 8Gi + +path: + type: string + description: Path to be persisted. + example: /bitnami + +## An instance would be: +# enabled: true +# storageClass: "-" +# accessMode: ReadWriteOnce +# size: 8Gi +# path: /bitnami +``` + +### ExistingSecret + +```yaml +name: + type: string + description: Name of the existing secret. + example: mySecret +keyMapping: + description: Mapping between the expected key name and the name of the key in the existing secret. + type: object + +## An instance would be: +# name: mySecret +# keyMapping: +# password: myPasswordKey +``` + +#### Example of use + +When we store sensitive data for a deployment in a secret, some times we want to give to users the possibility of using theirs existing secrets. + +```yaml +# templates/secret.yaml +--- +apiVersion: v1 +kind: Secret +metadata: + name: {{ include "common.names.fullname" . }} + labels: + app: {{ include "common.names.fullname" . }} +type: Opaque +data: + password: {{ .Values.password | b64enc | quote }} + +# templates/dpl.yaml +--- +... + env: + - name: PASSWORD + valueFrom: + secretKeyRef: + name: {{ include "common.secrets.name" (dict "existingSecret" .Values.existingSecret "context" $) }} + key: {{ include "common.secrets.key" (dict "existingSecret" .Values.existingSecret "key" "password") }} +... + +# values.yaml +--- +name: mySecret +keyMapping: + password: myPasswordKey +``` + +### ValidateValue + +#### NOTES.txt + +```console +{{- $validateValueConf00 := (dict "valueKey" "path.to.value00" "secret" "secretName" "field" "password-00") -}} +{{- $validateValueConf01 := (dict "valueKey" "path.to.value01" "secret" "secretName" "field" "password-01") -}} + +{{ include "common.validations.values.multiple.empty" (dict "required" (list $validateValueConf00 $validateValueConf01) "context" $) }} +``` + +If we force those values to be empty we will see some alerts + +```console +$ helm install test mychart --set path.to.value00="",path.to.value01="" + 'path.to.value00' must not be empty, please add '--set path.to.value00=$PASSWORD_00' to the command. To get the current value: + + export PASSWORD_00=$(kubectl get secret --namespace default secretName -o jsonpath="{.data.password-00}" | base64 -d) + + 'path.to.value01' must not be empty, please add '--set path.to.value01=$PASSWORD_01' to the command. To get the current value: + + export PASSWORD_01=$(kubectl get secret --namespace default secretName -o jsonpath="{.data.password-01}" | base64 -d) +``` + +## Upgrading + +### To 1.0.0 + +[On November 13, 2020, Helm v2 support was formally finished](https://github.com/helm/charts#status-of-the-project), this major version is the result of the required changes applied to the Helm Chart to be able to incorporate the different features added in Helm v3 and to be consistent with the Helm project itself regarding the Helm v2 EOL. + +**What changes were introduced in this major version?** + +- Previous versions of this Helm Chart use `apiVersion: v1` (installable by both Helm 2 and 3), this Helm Chart was updated to `apiVersion: v2` (installable by Helm 3 only). [Here](https://helm.sh/docs/topics/charts/#the-apiversion-field) you can find more information about the `apiVersion` field. +- Use `type: library`. [Here](https://v3.helm.sh/docs/faq/#library-chart-support) you can find more information. +- The different fields present in the *Chart.yaml* file has been ordered alphabetically in a homogeneous way for all the Bitnami Helm Charts + +**Considerations when upgrading to this version** + +- If you want to upgrade to this version from a previous one installed with Helm v3, you shouldn't face any issues +- If you want to upgrade to this version using Helm v2, this scenario is not supported as this version doesn't support Helm v2 anymore +- If you installed the previous version with Helm v2 and wants to upgrade to this version with Helm v3, please refer to the [official Helm documentation](https://helm.sh/docs/topics/v2_v3_migration/#migration-use-cases) about migrating from Helm v2 to v3 + +**Useful links** + +- https://docs.bitnami.com/tutorials/resolve-helm2-helm3-post-migration-issues/ +- https://helm.sh/docs/topics/v2_v3_migration/ +- https://helm.sh/blog/migrate-from-helm-v2-to-helm-v3/ + +## License + +Copyright © 2022 Bitnami + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff --git a/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/charts/postgresql/charts/common/templates/_affinities.tpl b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/charts/postgresql/charts/common/templates/_affinities.tpl new file mode 100644 index 00000000..189ea403 --- /dev/null +++ b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/charts/postgresql/charts/common/templates/_affinities.tpl @@ -0,0 +1,102 @@ +{{/* vim: set filetype=mustache: */}} + +{{/* +Return a soft nodeAffinity definition +{{ include "common.affinities.nodes.soft" (dict "key" "FOO" "values" (list "BAR" "BAZ")) -}} +*/}} +{{- define "common.affinities.nodes.soft" -}} +preferredDuringSchedulingIgnoredDuringExecution: + - preference: + matchExpressions: + - key: {{ .key }} + operator: In + values: + {{- range .values }} + - {{ . | quote }} + {{- end }} + weight: 1 +{{- end -}} + +{{/* +Return a hard nodeAffinity definition +{{ include "common.affinities.nodes.hard" (dict "key" "FOO" "values" (list "BAR" "BAZ")) -}} +*/}} +{{- define "common.affinities.nodes.hard" -}} +requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: {{ .key }} + operator: In + values: + {{- range .values }} + - {{ . | quote }} + {{- end }} +{{- end -}} + +{{/* +Return a nodeAffinity definition +{{ include "common.affinities.nodes" (dict "type" "soft" "key" "FOO" "values" (list "BAR" "BAZ")) -}} +*/}} +{{- define "common.affinities.nodes" -}} + {{- if eq .type "soft" }} + {{- include "common.affinities.nodes.soft" . -}} + {{- else if eq .type "hard" }} + {{- include "common.affinities.nodes.hard" . -}} + {{- end -}} +{{- end -}} + +{{/* +Return a soft podAffinity/podAntiAffinity definition +{{ include "common.affinities.pods.soft" (dict "component" "FOO" "extraMatchLabels" .Values.extraMatchLabels "context" $) -}} +*/}} +{{- define "common.affinities.pods.soft" -}} +{{- $component := default "" .component -}} +{{- $extraMatchLabels := default (dict) .extraMatchLabels -}} +preferredDuringSchedulingIgnoredDuringExecution: + - podAffinityTerm: + labelSelector: + matchLabels: {{- (include "common.labels.matchLabels" .context) | nindent 10 }} + {{- if not (empty $component) }} + {{ printf "app.kubernetes.io/component: %s" $component }} + {{- end }} + {{- range $key, $value := $extraMatchLabels }} + {{ $key }}: {{ $value | quote }} + {{- end }} + namespaces: + - {{ .context.Release.Namespace | quote }} + topologyKey: kubernetes.io/hostname + weight: 1 +{{- end -}} + +{{/* +Return a hard podAffinity/podAntiAffinity definition +{{ include "common.affinities.pods.hard" (dict "component" "FOO" "extraMatchLabels" .Values.extraMatchLabels "context" $) -}} +*/}} +{{- define "common.affinities.pods.hard" -}} +{{- $component := default "" .component -}} +{{- $extraMatchLabels := default (dict) .extraMatchLabels -}} +requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchLabels: {{- (include "common.labels.matchLabels" .context) | nindent 8 }} + {{- if not (empty $component) }} + {{ printf "app.kubernetes.io/component: %s" $component }} + {{- end }} + {{- range $key, $value := $extraMatchLabels }} + {{ $key }}: {{ $value | quote }} + {{- end }} + namespaces: + - {{ .context.Release.Namespace | quote }} + topologyKey: kubernetes.io/hostname +{{- end -}} + +{{/* +Return a podAffinity/podAntiAffinity definition +{{ include "common.affinities.pods" (dict "type" "soft" "key" "FOO" "values" (list "BAR" "BAZ")) -}} +*/}} +{{- define "common.affinities.pods" -}} + {{- if eq .type "soft" }} + {{- include "common.affinities.pods.soft" . -}} + {{- else if eq .type "hard" }} + {{- include "common.affinities.pods.hard" . -}} + {{- end -}} +{{- end -}} diff --git a/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/charts/postgresql/charts/common/templates/_capabilities.tpl b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/charts/postgresql/charts/common/templates/_capabilities.tpl new file mode 100644 index 00000000..9d9b7600 --- /dev/null +++ b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/charts/postgresql/charts/common/templates/_capabilities.tpl @@ -0,0 +1,154 @@ +{{/* vim: set filetype=mustache: */}} + +{{/* +Return the target Kubernetes version +*/}} +{{- define "common.capabilities.kubeVersion" -}} +{{- if .Values.global }} + {{- if .Values.global.kubeVersion }} + {{- .Values.global.kubeVersion -}} + {{- else }} + {{- default .Capabilities.KubeVersion.Version .Values.kubeVersion -}} + {{- end -}} +{{- else }} +{{- default .Capabilities.KubeVersion.Version .Values.kubeVersion -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for poddisruptionbudget. +*/}} +{{- define "common.capabilities.policy.apiVersion" -}} +{{- if semverCompare "<1.21-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "policy/v1beta1" -}} +{{- else -}} +{{- print "policy/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for networkpolicy. +*/}} +{{- define "common.capabilities.networkPolicy.apiVersion" -}} +{{- if semverCompare "<1.7-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "extensions/v1beta1" -}} +{{- else -}} +{{- print "networking.k8s.io/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for cronjob. +*/}} +{{- define "common.capabilities.cronjob.apiVersion" -}} +{{- if semverCompare "<1.21-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "batch/v1beta1" -}} +{{- else -}} +{{- print "batch/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for deployment. +*/}} +{{- define "common.capabilities.deployment.apiVersion" -}} +{{- if semverCompare "<1.14-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "extensions/v1beta1" -}} +{{- else -}} +{{- print "apps/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for statefulset. +*/}} +{{- define "common.capabilities.statefulset.apiVersion" -}} +{{- if semverCompare "<1.14-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "apps/v1beta1" -}} +{{- else -}} +{{- print "apps/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for ingress. +*/}} +{{- define "common.capabilities.ingress.apiVersion" -}} +{{- if .Values.ingress -}} +{{- if .Values.ingress.apiVersion -}} +{{- .Values.ingress.apiVersion -}} +{{- else if semverCompare "<1.14-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "extensions/v1beta1" -}} +{{- else if semverCompare "<1.19-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "networking.k8s.io/v1beta1" -}} +{{- else -}} +{{- print "networking.k8s.io/v1" -}} +{{- end }} +{{- else if semverCompare "<1.14-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "extensions/v1beta1" -}} +{{- else if semverCompare "<1.19-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "networking.k8s.io/v1beta1" -}} +{{- else -}} +{{- print "networking.k8s.io/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for RBAC resources. +*/}} +{{- define "common.capabilities.rbac.apiVersion" -}} +{{- if semverCompare "<1.17-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "rbac.authorization.k8s.io/v1beta1" -}} +{{- else -}} +{{- print "rbac.authorization.k8s.io/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for CRDs. +*/}} +{{- define "common.capabilities.crd.apiVersion" -}} +{{- if semverCompare "<1.19-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "apiextensions.k8s.io/v1beta1" -}} +{{- else -}} +{{- print "apiextensions.k8s.io/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for APIService. +*/}} +{{- define "common.capabilities.apiService.apiVersion" -}} +{{- if semverCompare "<1.10-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "apiregistration.k8s.io/v1beta1" -}} +{{- else -}} +{{- print "apiregistration.k8s.io/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for Horizontal Pod Autoscaler. +*/}} +{{- define "common.capabilities.hpa.apiVersion" -}} +{{- if semverCompare "<1.23-0" (include "common.capabilities.kubeVersion" .context) -}} +{{- if .beta2 -}} +{{- print "autoscaling/v2beta2" -}} +{{- else -}} +{{- print "autoscaling/v2beta1" -}} +{{- end -}} +{{- else -}} +{{- print "autoscaling/v2" -}} +{{- end -}} +{{- end -}} + +{{/* +Returns true if the used Helm version is 3.3+. +A way to check the used Helm version was not introduced until version 3.3.0 with .Capabilities.HelmVersion, which contains an additional "{}}" structure. +This check is introduced as a regexMatch instead of {{ if .Capabilities.HelmVersion }} because checking for the key HelmVersion in <3.3 results in a "interface not found" error. +**To be removed when the catalog's minimun Helm version is 3.3** +*/}} +{{- define "common.capabilities.supportsHelmVersion" -}} +{{- if regexMatch "{(v[0-9])*[^}]*}}$" (.Capabilities | toString ) }} + {{- true -}} +{{- end -}} +{{- end -}} diff --git a/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/charts/postgresql/charts/common/templates/_errors.tpl b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/charts/postgresql/charts/common/templates/_errors.tpl new file mode 100644 index 00000000..a79cc2e3 --- /dev/null +++ b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/charts/postgresql/charts/common/templates/_errors.tpl @@ -0,0 +1,23 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Through error when upgrading using empty passwords values that must not be empty. + +Usage: +{{- $validationError00 := include "common.validations.values.single.empty" (dict "valueKey" "path.to.password00" "secret" "secretName" "field" "password-00") -}} +{{- $validationError01 := include "common.validations.values.single.empty" (dict "valueKey" "path.to.password01" "secret" "secretName" "field" "password-01") -}} +{{ include "common.errors.upgrade.passwords.empty" (dict "validationErrors" (list $validationError00 $validationError01) "context" $) }} + +Required password params: + - validationErrors - String - Required. List of validation strings to be return, if it is empty it won't throw error. + - context - Context - Required. Parent context. +*/}} +{{- define "common.errors.upgrade.passwords.empty" -}} + {{- $validationErrors := join "" .validationErrors -}} + {{- if and $validationErrors .context.Release.IsUpgrade -}} + {{- $errorString := "\nPASSWORDS ERROR: You must provide your current passwords when upgrading the release." -}} + {{- $errorString = print $errorString "\n Note that even after reinstallation, old credentials may be needed as they may be kept in persistent volume claims." -}} + {{- $errorString = print $errorString "\n Further information can be obtained at https://docs.bitnami.com/general/how-to/troubleshoot-helm-chart-issues/#credential-errors-while-upgrading-chart-releases" -}} + {{- $errorString = print $errorString "\n%s" -}} + {{- printf $errorString $validationErrors | fail -}} + {{- end -}} +{{- end -}} diff --git a/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/charts/postgresql/charts/common/templates/_images.tpl b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/charts/postgresql/charts/common/templates/_images.tpl new file mode 100644 index 00000000..42ffbc72 --- /dev/null +++ b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/charts/postgresql/charts/common/templates/_images.tpl @@ -0,0 +1,75 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Return the proper image name +{{ include "common.images.image" ( dict "imageRoot" .Values.path.to.the.image "global" $) }} +*/}} +{{- define "common.images.image" -}} +{{- $registryName := .imageRoot.registry -}} +{{- $repositoryName := .imageRoot.repository -}} +{{- $tag := .imageRoot.tag | toString -}} +{{- if .global }} + {{- if .global.imageRegistry }} + {{- $registryName = .global.imageRegistry -}} + {{- end -}} +{{- end -}} +{{- if $registryName }} +{{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} +{{- else -}} +{{- printf "%s:%s" $repositoryName $tag -}} +{{- end -}} +{{- end -}} + +{{/* +Return the proper Docker Image Registry Secret Names (deprecated: use common.images.renderPullSecrets instead) +{{ include "common.images.pullSecrets" ( dict "images" (list .Values.path.to.the.image1, .Values.path.to.the.image2) "global" .Values.global) }} +*/}} +{{- define "common.images.pullSecrets" -}} + {{- $pullSecrets := list }} + + {{- if .global }} + {{- range .global.imagePullSecrets -}} + {{- $pullSecrets = append $pullSecrets . -}} + {{- end -}} + {{- end -}} + + {{- range .images -}} + {{- range .pullSecrets -}} + {{- $pullSecrets = append $pullSecrets . -}} + {{- end -}} + {{- end -}} + + {{- if (not (empty $pullSecrets)) }} +imagePullSecrets: + {{- range $pullSecrets }} + - name: {{ . }} + {{- end }} + {{- end }} +{{- end -}} + +{{/* +Return the proper Docker Image Registry Secret Names evaluating values as templates +{{ include "common.images.renderPullSecrets" ( dict "images" (list .Values.path.to.the.image1, .Values.path.to.the.image2) "context" $) }} +*/}} +{{- define "common.images.renderPullSecrets" -}} + {{- $pullSecrets := list }} + {{- $context := .context }} + + {{- if $context.Values.global }} + {{- range $context.Values.global.imagePullSecrets -}} + {{- $pullSecrets = append $pullSecrets (include "common.tplvalues.render" (dict "value" . "context" $context)) -}} + {{- end -}} + {{- end -}} + + {{- range .images -}} + {{- range .pullSecrets -}} + {{- $pullSecrets = append $pullSecrets (include "common.tplvalues.render" (dict "value" . "context" $context)) -}} + {{- end -}} + {{- end -}} + + {{- if (not (empty $pullSecrets)) }} +imagePullSecrets: + {{- range $pullSecrets }} + - name: {{ . }} + {{- end }} + {{- end }} +{{- end -}} diff --git a/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/charts/postgresql/charts/common/templates/_ingress.tpl b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/charts/postgresql/charts/common/templates/_ingress.tpl new file mode 100644 index 00000000..8caf73a6 --- /dev/null +++ b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/charts/postgresql/charts/common/templates/_ingress.tpl @@ -0,0 +1,68 @@ +{{/* vim: set filetype=mustache: */}} + +{{/* +Generate backend entry that is compatible with all Kubernetes API versions. + +Usage: +{{ include "common.ingress.backend" (dict "serviceName" "backendName" "servicePort" "backendPort" "context" $) }} + +Params: + - serviceName - String. Name of an existing service backend + - servicePort - String/Int. Port name (or number) of the service. It will be translated to different yaml depending if it is a string or an integer. + - context - Dict - Required. The context for the template evaluation. +*/}} +{{- define "common.ingress.backend" -}} +{{- $apiVersion := (include "common.capabilities.ingress.apiVersion" .context) -}} +{{- if or (eq $apiVersion "extensions/v1beta1") (eq $apiVersion "networking.k8s.io/v1beta1") -}} +serviceName: {{ .serviceName }} +servicePort: {{ .servicePort }} +{{- else -}} +service: + name: {{ .serviceName }} + port: + {{- if typeIs "string" .servicePort }} + name: {{ .servicePort }} + {{- else if or (typeIs "int" .servicePort) (typeIs "float64" .servicePort) }} + number: {{ .servicePort | int }} + {{- end }} +{{- end -}} +{{- end -}} + +{{/* +Print "true" if the API pathType field is supported +Usage: +{{ include "common.ingress.supportsPathType" . }} +*/}} +{{- define "common.ingress.supportsPathType" -}} +{{- if (semverCompare "<1.18-0" (include "common.capabilities.kubeVersion" .)) -}} +{{- print "false" -}} +{{- else -}} +{{- print "true" -}} +{{- end -}} +{{- end -}} + +{{/* +Returns true if the ingressClassname field is supported +Usage: +{{ include "common.ingress.supportsIngressClassname" . }} +*/}} +{{- define "common.ingress.supportsIngressClassname" -}} +{{- if semverCompare "<1.18-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "false" -}} +{{- else -}} +{{- print "true" -}} +{{- end -}} +{{- end -}} + +{{/* +Return true if cert-manager required annotations for TLS signed +certificates are set in the Ingress annotations +Ref: https://cert-manager.io/docs/usage/ingress/#supported-annotations +Usage: +{{ include "common.ingress.certManagerRequest" ( dict "annotations" .Values.path.to.the.ingress.annotations ) }} +*/}} +{{- define "common.ingress.certManagerRequest" -}} +{{ if or (hasKey .annotations "cert-manager.io/cluster-issuer") (hasKey .annotations "cert-manager.io/issuer") }} + {{- true -}} +{{- end -}} +{{- end -}} diff --git a/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/charts/postgresql/charts/common/templates/_labels.tpl b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/charts/postgresql/charts/common/templates/_labels.tpl new file mode 100644 index 00000000..252066c7 --- /dev/null +++ b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/charts/postgresql/charts/common/templates/_labels.tpl @@ -0,0 +1,18 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Kubernetes standard labels +*/}} +{{- define "common.labels.standard" -}} +app.kubernetes.io/name: {{ include "common.names.name" . }} +helm.sh/chart: {{ include "common.names.chart" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +app.kubernetes.io/managed-by: {{ .Release.Service }} +{{- end -}} + +{{/* +Labels to use on deploy.spec.selector.matchLabels and svc.spec.selector +*/}} +{{- define "common.labels.matchLabels" -}} +app.kubernetes.io/name: {{ include "common.names.name" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +{{- end -}} diff --git a/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/charts/postgresql/charts/common/templates/_names.tpl b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/charts/postgresql/charts/common/templates/_names.tpl new file mode 100644 index 00000000..1bdac8b7 --- /dev/null +++ b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/charts/postgresql/charts/common/templates/_names.tpl @@ -0,0 +1,70 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "common.names.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "common.names.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "common.names.fullname" -}} +{{- if .Values.fullnameOverride -}} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Create a default fully qualified dependency name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +Usage: +{{ include "common.names.dependency.fullname" (dict "chartName" "dependency-chart-name" "chartValues" .Values.dependency-chart "context" $) }} +*/}} +{{- define "common.names.dependency.fullname" -}} +{{- if .chartValues.fullnameOverride -}} +{{- .chartValues.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .chartName .chartValues.nameOverride -}} +{{- if contains $name .context.Release.Name -}} +{{- .context.Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .context.Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Allow the release namespace to be overridden for multi-namespace deployments in combined charts. +*/}} +{{- define "common.names.namespace" -}} +{{- if .Values.namespaceOverride -}} +{{- .Values.namespaceOverride -}} +{{- else -}} +{{- .Release.Namespace -}} +{{- end -}} +{{- end -}} + +{{/* +Create a fully qualified app name adding the installation's namespace. +*/}} +{{- define "common.names.fullname.namespace" -}} +{{- printf "%s-%s" (include "common.names.fullname" .) (include "common.names.namespace" .) | trunc 63 | trimSuffix "-" -}} +{{- end -}} diff --git a/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/charts/postgresql/charts/common/templates/_secrets.tpl b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/charts/postgresql/charts/common/templates/_secrets.tpl new file mode 100644 index 00000000..a53fb44f --- /dev/null +++ b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/charts/postgresql/charts/common/templates/_secrets.tpl @@ -0,0 +1,140 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Generate secret name. + +Usage: +{{ include "common.secrets.name" (dict "existingSecret" .Values.path.to.the.existingSecret "defaultNameSuffix" "mySuffix" "context" $) }} + +Params: + - existingSecret - ExistingSecret/String - Optional. The path to the existing secrets in the values.yaml given by the user + to be used instead of the default one. Allows for it to be of type String (just the secret name) for backwards compatibility. + +info: https://github.com/bitnami/charts/tree/master/bitnami/common#existingsecret + - defaultNameSuffix - String - Optional. It is used only if we have several secrets in the same deployment. + - context - Dict - Required. The context for the template evaluation. +*/}} +{{- define "common.secrets.name" -}} +{{- $name := (include "common.names.fullname" .context) -}} + +{{- if .defaultNameSuffix -}} +{{- $name = printf "%s-%s" $name .defaultNameSuffix | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{- with .existingSecret -}} +{{- if not (typeIs "string" .) -}} +{{- with .name -}} +{{- $name = . -}} +{{- end -}} +{{- else -}} +{{- $name = . -}} +{{- end -}} +{{- end -}} + +{{- printf "%s" $name -}} +{{- end -}} + +{{/* +Generate secret key. + +Usage: +{{ include "common.secrets.key" (dict "existingSecret" .Values.path.to.the.existingSecret "key" "keyName") }} + +Params: + - existingSecret - ExistingSecret/String - Optional. The path to the existing secrets in the values.yaml given by the user + to be used instead of the default one. Allows for it to be of type String (just the secret name) for backwards compatibility. + +info: https://github.com/bitnami/charts/tree/master/bitnami/common#existingsecret + - key - String - Required. Name of the key in the secret. +*/}} +{{- define "common.secrets.key" -}} +{{- $key := .key -}} + +{{- if .existingSecret -}} + {{- if not (typeIs "string" .existingSecret) -}} + {{- if .existingSecret.keyMapping -}} + {{- $key = index .existingSecret.keyMapping $.key -}} + {{- end -}} + {{- end }} +{{- end -}} + +{{- printf "%s" $key -}} +{{- end -}} + +{{/* +Generate secret password or retrieve one if already created. + +Usage: +{{ include "common.secrets.passwords.manage" (dict "secret" "secret-name" "key" "keyName" "providedValues" (list "path.to.password1" "path.to.password2") "length" 10 "strong" false "chartName" "chartName" "context" $) }} + +Params: + - secret - String - Required - Name of the 'Secret' resource where the password is stored. + - key - String - Required - Name of the key in the secret. + - providedValues - List - Required - The path to the validating value in the values.yaml, e.g: "mysql.password". Will pick first parameter with a defined value. + - length - int - Optional - Length of the generated random password. + - strong - Boolean - Optional - Whether to add symbols to the generated random password. + - chartName - String - Optional - Name of the chart used when said chart is deployed as a subchart. + - context - Context - Required - Parent context. + +The order in which this function returns a secret password: + 1. Already existing 'Secret' resource + (If a 'Secret' resource is found under the name provided to the 'secret' parameter to this function and that 'Secret' resource contains a key with the name passed as the 'key' parameter to this function then the value of this existing secret password will be returned) + 2. Password provided via the values.yaml + (If one of the keys passed to the 'providedValues' parameter to this function is a valid path to a key in the values.yaml and has a value, the value of the first key with a value will be returned) + 3. Randomly generated secret password + (A new random secret password with the length specified in the 'length' parameter will be generated and returned) + +*/}} +{{- define "common.secrets.passwords.manage" -}} + +{{- $password := "" }} +{{- $subchart := "" }} +{{- $chartName := default "" .chartName }} +{{- $passwordLength := default 10 .length }} +{{- $providedPasswordKey := include "common.utils.getKeyFromList" (dict "keys" .providedValues "context" $.context) }} +{{- $providedPasswordValue := include "common.utils.getValueFromKey" (dict "key" $providedPasswordKey "context" $.context) }} +{{- $secretData := (lookup "v1" "Secret" $.context.Release.Namespace .secret).data }} +{{- if $secretData }} + {{- if hasKey $secretData .key }} + {{- $password = index $secretData .key }} + {{- else }} + {{- printf "\nPASSWORDS ERROR: The secret \"%s\" does not contain the key \"%s\"\n" .secret .key | fail -}} + {{- end -}} +{{- else if $providedPasswordValue }} + {{- $password = $providedPasswordValue | toString | b64enc | quote }} +{{- else }} + + {{- if .context.Values.enabled }} + {{- $subchart = $chartName }} + {{- end -}} + + {{- $requiredPassword := dict "valueKey" $providedPasswordKey "secret" .secret "field" .key "subchart" $subchart "context" $.context -}} + {{- $requiredPasswordError := include "common.validations.values.single.empty" $requiredPassword -}} + {{- $passwordValidationErrors := list $requiredPasswordError -}} + {{- include "common.errors.upgrade.passwords.empty" (dict "validationErrors" $passwordValidationErrors "context" $.context) -}} + + {{- if .strong }} + {{- $subStr := list (lower (randAlpha 1)) (randNumeric 1) (upper (randAlpha 1)) | join "_" }} + {{- $password = randAscii $passwordLength }} + {{- $password = regexReplaceAllLiteral "\\W" $password "@" | substr 5 $passwordLength }} + {{- $password = printf "%s%s" $subStr $password | toString | shuffle | b64enc | quote }} + {{- else }} + {{- $password = randAlphaNum $passwordLength | b64enc | quote }} + {{- end }} +{{- end -}} +{{- printf "%s" $password -}} +{{- end -}} + +{{/* +Returns whether a previous generated secret already exists + +Usage: +{{ include "common.secrets.exists" (dict "secret" "secret-name" "context" $) }} + +Params: + - secret - String - Required - Name of the 'Secret' resource where the password is stored. + - context - Context - Required - Parent context. +*/}} +{{- define "common.secrets.exists" -}} +{{- $secret := (lookup "v1" "Secret" $.context.Release.Namespace .secret) }} +{{- if $secret }} + {{- true -}} +{{- end -}} +{{- end -}} diff --git a/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/charts/postgresql/charts/common/templates/_storage.tpl b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/charts/postgresql/charts/common/templates/_storage.tpl new file mode 100644 index 00000000..60e2a844 --- /dev/null +++ b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/charts/postgresql/charts/common/templates/_storage.tpl @@ -0,0 +1,23 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Return the proper Storage Class +{{ include "common.storage.class" ( dict "persistence" .Values.path.to.the.persistence "global" $) }} +*/}} +{{- define "common.storage.class" -}} + +{{- $storageClass := .persistence.storageClass -}} +{{- if .global -}} + {{- if .global.storageClass -}} + {{- $storageClass = .global.storageClass -}} + {{- end -}} +{{- end -}} + +{{- if $storageClass -}} + {{- if (eq "-" $storageClass) -}} + {{- printf "storageClassName: \"\"" -}} + {{- else }} + {{- printf "storageClassName: %s" $storageClass -}} + {{- end -}} +{{- end -}} + +{{- end -}} diff --git a/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/charts/postgresql/charts/common/templates/_tplvalues.tpl b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/charts/postgresql/charts/common/templates/_tplvalues.tpl new file mode 100644 index 00000000..2db16685 --- /dev/null +++ b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/charts/postgresql/charts/common/templates/_tplvalues.tpl @@ -0,0 +1,13 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Renders a value that contains template. +Usage: +{{ include "common.tplvalues.render" ( dict "value" .Values.path.to.the.Value "context" $) }} +*/}} +{{- define "common.tplvalues.render" -}} + {{- if typeIs "string" .value }} + {{- tpl .value .context }} + {{- else }} + {{- tpl (.value | toYaml) .context }} + {{- end }} +{{- end -}} diff --git a/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/charts/postgresql/charts/common/templates/_utils.tpl b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/charts/postgresql/charts/common/templates/_utils.tpl new file mode 100644 index 00000000..8c22b2a3 --- /dev/null +++ b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/charts/postgresql/charts/common/templates/_utils.tpl @@ -0,0 +1,62 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Print instructions to get a secret value. +Usage: +{{ include "common.utils.secret.getvalue" (dict "secret" "secret-name" "field" "secret-value-field" "context" $) }} +*/}} +{{- define "common.utils.secret.getvalue" -}} +{{- $varname := include "common.utils.fieldToEnvVar" . -}} +export {{ $varname }}=$(kubectl get secret --namespace {{ .context.Release.Namespace | quote }} {{ .secret }} -o jsonpath="{.data.{{ .field }}}" | base64 -d) +{{- end -}} + +{{/* +Build env var name given a field +Usage: +{{ include "common.utils.fieldToEnvVar" dict "field" "my-password" }} +*/}} +{{- define "common.utils.fieldToEnvVar" -}} + {{- $fieldNameSplit := splitList "-" .field -}} + {{- $upperCaseFieldNameSplit := list -}} + + {{- range $fieldNameSplit -}} + {{- $upperCaseFieldNameSplit = append $upperCaseFieldNameSplit ( upper . ) -}} + {{- end -}} + + {{ join "_" $upperCaseFieldNameSplit }} +{{- end -}} + +{{/* +Gets a value from .Values given +Usage: +{{ include "common.utils.getValueFromKey" (dict "key" "path.to.key" "context" $) }} +*/}} +{{- define "common.utils.getValueFromKey" -}} +{{- $splitKey := splitList "." .key -}} +{{- $value := "" -}} +{{- $latestObj := $.context.Values -}} +{{- range $splitKey -}} + {{- if not $latestObj -}} + {{- printf "please review the entire path of '%s' exists in values" $.key | fail -}} + {{- end -}} + {{- $value = ( index $latestObj . ) -}} + {{- $latestObj = $value -}} +{{- end -}} +{{- printf "%v" (default "" $value) -}} +{{- end -}} + +{{/* +Returns first .Values key with a defined value or first of the list if all non-defined +Usage: +{{ include "common.utils.getKeyFromList" (dict "keys" (list "path.to.key1" "path.to.key2") "context" $) }} +*/}} +{{- define "common.utils.getKeyFromList" -}} +{{- $key := first .keys -}} +{{- $reverseKeys := reverse .keys }} +{{- range $reverseKeys }} + {{- $value := include "common.utils.getValueFromKey" (dict "key" . "context" $.context ) }} + {{- if $value -}} + {{- $key = . }} + {{- end -}} +{{- end -}} +{{- printf "%s" $key -}} +{{- end -}} diff --git a/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/charts/postgresql/charts/common/templates/_warnings.tpl b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/charts/postgresql/charts/common/templates/_warnings.tpl new file mode 100644 index 00000000..ae10fa41 --- /dev/null +++ b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/charts/postgresql/charts/common/templates/_warnings.tpl @@ -0,0 +1,14 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Warning about using rolling tag. +Usage: +{{ include "common.warnings.rollingTag" .Values.path.to.the.imageRoot }} +*/}} +{{- define "common.warnings.rollingTag" -}} + +{{- if and (contains "bitnami/" .repository) (not (.tag | toString | regexFind "-r\\d+$|sha256:")) }} +WARNING: Rolling tag detected ({{ .repository }}:{{ .tag }}), please note that it is strongly recommended to avoid using rolling tags in a production environment. ++info https://docs.bitnami.com/containers/how-to/understand-rolling-tags-containers/ +{{- end }} + +{{- end -}} diff --git a/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/charts/postgresql/charts/common/templates/validations/_cassandra.tpl b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/charts/postgresql/charts/common/templates/validations/_cassandra.tpl new file mode 100644 index 00000000..ded1ae3b --- /dev/null +++ b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/charts/postgresql/charts/common/templates/validations/_cassandra.tpl @@ -0,0 +1,72 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Validate Cassandra required passwords are not empty. + +Usage: +{{ include "common.validations.values.cassandra.passwords" (dict "secret" "secretName" "subchart" false "context" $) }} +Params: + - secret - String - Required. Name of the secret where Cassandra values are stored, e.g: "cassandra-passwords-secret" + - subchart - Boolean - Optional. Whether Cassandra is used as subchart or not. Default: false +*/}} +{{- define "common.validations.values.cassandra.passwords" -}} + {{- $existingSecret := include "common.cassandra.values.existingSecret" . -}} + {{- $enabled := include "common.cassandra.values.enabled" . -}} + {{- $dbUserPrefix := include "common.cassandra.values.key.dbUser" . -}} + {{- $valueKeyPassword := printf "%s.password" $dbUserPrefix -}} + + {{- if and (or (not $existingSecret) (eq $existingSecret "\"\"")) (eq $enabled "true") -}} + {{- $requiredPasswords := list -}} + + {{- $requiredPassword := dict "valueKey" $valueKeyPassword "secret" .secret "field" "cassandra-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredPassword -}} + + {{- include "common.validations.values.multiple.empty" (dict "required" $requiredPasswords "context" .context) -}} + + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for existingSecret. + +Usage: +{{ include "common.cassandra.values.existingSecret" (dict "context" $) }} +Params: + - subchart - Boolean - Optional. Whether Cassandra is used as subchart or not. Default: false +*/}} +{{- define "common.cassandra.values.existingSecret" -}} + {{- if .subchart -}} + {{- .context.Values.cassandra.dbUser.existingSecret | quote -}} + {{- else -}} + {{- .context.Values.dbUser.existingSecret | quote -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for enabled cassandra. + +Usage: +{{ include "common.cassandra.values.enabled" (dict "context" $) }} +*/}} +{{- define "common.cassandra.values.enabled" -}} + {{- if .subchart -}} + {{- printf "%v" .context.Values.cassandra.enabled -}} + {{- else -}} + {{- printf "%v" (not .context.Values.enabled) -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for the key dbUser + +Usage: +{{ include "common.cassandra.values.key.dbUser" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether Cassandra is used as subchart or not. Default: false +*/}} +{{- define "common.cassandra.values.key.dbUser" -}} + {{- if .subchart -}} + cassandra.dbUser + {{- else -}} + dbUser + {{- end -}} +{{- end -}} diff --git a/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/charts/postgresql/charts/common/templates/validations/_mariadb.tpl b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/charts/postgresql/charts/common/templates/validations/_mariadb.tpl new file mode 100644 index 00000000..b6906ff7 --- /dev/null +++ b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/charts/postgresql/charts/common/templates/validations/_mariadb.tpl @@ -0,0 +1,103 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Validate MariaDB required passwords are not empty. + +Usage: +{{ include "common.validations.values.mariadb.passwords" (dict "secret" "secretName" "subchart" false "context" $) }} +Params: + - secret - String - Required. Name of the secret where MariaDB values are stored, e.g: "mysql-passwords-secret" + - subchart - Boolean - Optional. Whether MariaDB is used as subchart or not. Default: false +*/}} +{{- define "common.validations.values.mariadb.passwords" -}} + {{- $existingSecret := include "common.mariadb.values.auth.existingSecret" . -}} + {{- $enabled := include "common.mariadb.values.enabled" . -}} + {{- $architecture := include "common.mariadb.values.architecture" . -}} + {{- $authPrefix := include "common.mariadb.values.key.auth" . -}} + {{- $valueKeyRootPassword := printf "%s.rootPassword" $authPrefix -}} + {{- $valueKeyUsername := printf "%s.username" $authPrefix -}} + {{- $valueKeyPassword := printf "%s.password" $authPrefix -}} + {{- $valueKeyReplicationPassword := printf "%s.replicationPassword" $authPrefix -}} + + {{- if and (or (not $existingSecret) (eq $existingSecret "\"\"")) (eq $enabled "true") -}} + {{- $requiredPasswords := list -}} + + {{- $requiredRootPassword := dict "valueKey" $valueKeyRootPassword "secret" .secret "field" "mariadb-root-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredRootPassword -}} + + {{- $valueUsername := include "common.utils.getValueFromKey" (dict "key" $valueKeyUsername "context" .context) }} + {{- if not (empty $valueUsername) -}} + {{- $requiredPassword := dict "valueKey" $valueKeyPassword "secret" .secret "field" "mariadb-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredPassword -}} + {{- end -}} + + {{- if (eq $architecture "replication") -}} + {{- $requiredReplicationPassword := dict "valueKey" $valueKeyReplicationPassword "secret" .secret "field" "mariadb-replication-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredReplicationPassword -}} + {{- end -}} + + {{- include "common.validations.values.multiple.empty" (dict "required" $requiredPasswords "context" .context) -}} + + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for existingSecret. + +Usage: +{{ include "common.mariadb.values.auth.existingSecret" (dict "context" $) }} +Params: + - subchart - Boolean - Optional. Whether MariaDB is used as subchart or not. Default: false +*/}} +{{- define "common.mariadb.values.auth.existingSecret" -}} + {{- if .subchart -}} + {{- .context.Values.mariadb.auth.existingSecret | quote -}} + {{- else -}} + {{- .context.Values.auth.existingSecret | quote -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for enabled mariadb. + +Usage: +{{ include "common.mariadb.values.enabled" (dict "context" $) }} +*/}} +{{- define "common.mariadb.values.enabled" -}} + {{- if .subchart -}} + {{- printf "%v" .context.Values.mariadb.enabled -}} + {{- else -}} + {{- printf "%v" (not .context.Values.enabled) -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for architecture + +Usage: +{{ include "common.mariadb.values.architecture" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether MariaDB is used as subchart or not. Default: false +*/}} +{{- define "common.mariadb.values.architecture" -}} + {{- if .subchart -}} + {{- .context.Values.mariadb.architecture -}} + {{- else -}} + {{- .context.Values.architecture -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for the key auth + +Usage: +{{ include "common.mariadb.values.key.auth" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether MariaDB is used as subchart or not. Default: false +*/}} +{{- define "common.mariadb.values.key.auth" -}} + {{- if .subchart -}} + mariadb.auth + {{- else -}} + auth + {{- end -}} +{{- end -}} diff --git a/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/charts/postgresql/charts/common/templates/validations/_mongodb.tpl b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/charts/postgresql/charts/common/templates/validations/_mongodb.tpl new file mode 100644 index 00000000..f820ec10 --- /dev/null +++ b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/charts/postgresql/charts/common/templates/validations/_mongodb.tpl @@ -0,0 +1,108 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Validate MongoDB® required passwords are not empty. + +Usage: +{{ include "common.validations.values.mongodb.passwords" (dict "secret" "secretName" "subchart" false "context" $) }} +Params: + - secret - String - Required. Name of the secret where MongoDB® values are stored, e.g: "mongodb-passwords-secret" + - subchart - Boolean - Optional. Whether MongoDB® is used as subchart or not. Default: false +*/}} +{{- define "common.validations.values.mongodb.passwords" -}} + {{- $existingSecret := include "common.mongodb.values.auth.existingSecret" . -}} + {{- $enabled := include "common.mongodb.values.enabled" . -}} + {{- $authPrefix := include "common.mongodb.values.key.auth" . -}} + {{- $architecture := include "common.mongodb.values.architecture" . -}} + {{- $valueKeyRootPassword := printf "%s.rootPassword" $authPrefix -}} + {{- $valueKeyUsername := printf "%s.username" $authPrefix -}} + {{- $valueKeyDatabase := printf "%s.database" $authPrefix -}} + {{- $valueKeyPassword := printf "%s.password" $authPrefix -}} + {{- $valueKeyReplicaSetKey := printf "%s.replicaSetKey" $authPrefix -}} + {{- $valueKeyAuthEnabled := printf "%s.enabled" $authPrefix -}} + + {{- $authEnabled := include "common.utils.getValueFromKey" (dict "key" $valueKeyAuthEnabled "context" .context) -}} + + {{- if and (or (not $existingSecret) (eq $existingSecret "\"\"")) (eq $enabled "true") (eq $authEnabled "true") -}} + {{- $requiredPasswords := list -}} + + {{- $requiredRootPassword := dict "valueKey" $valueKeyRootPassword "secret" .secret "field" "mongodb-root-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredRootPassword -}} + + {{- $valueUsername := include "common.utils.getValueFromKey" (dict "key" $valueKeyUsername "context" .context) }} + {{- $valueDatabase := include "common.utils.getValueFromKey" (dict "key" $valueKeyDatabase "context" .context) }} + {{- if and $valueUsername $valueDatabase -}} + {{- $requiredPassword := dict "valueKey" $valueKeyPassword "secret" .secret "field" "mongodb-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredPassword -}} + {{- end -}} + + {{- if (eq $architecture "replicaset") -}} + {{- $requiredReplicaSetKey := dict "valueKey" $valueKeyReplicaSetKey "secret" .secret "field" "mongodb-replica-set-key" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredReplicaSetKey -}} + {{- end -}} + + {{- include "common.validations.values.multiple.empty" (dict "required" $requiredPasswords "context" .context) -}} + + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for existingSecret. + +Usage: +{{ include "common.mongodb.values.auth.existingSecret" (dict "context" $) }} +Params: + - subchart - Boolean - Optional. Whether MongoDb is used as subchart or not. Default: false +*/}} +{{- define "common.mongodb.values.auth.existingSecret" -}} + {{- if .subchart -}} + {{- .context.Values.mongodb.auth.existingSecret | quote -}} + {{- else -}} + {{- .context.Values.auth.existingSecret | quote -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for enabled mongodb. + +Usage: +{{ include "common.mongodb.values.enabled" (dict "context" $) }} +*/}} +{{- define "common.mongodb.values.enabled" -}} + {{- if .subchart -}} + {{- printf "%v" .context.Values.mongodb.enabled -}} + {{- else -}} + {{- printf "%v" (not .context.Values.enabled) -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for the key auth + +Usage: +{{ include "common.mongodb.values.key.auth" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether MongoDB® is used as subchart or not. Default: false +*/}} +{{- define "common.mongodb.values.key.auth" -}} + {{- if .subchart -}} + mongodb.auth + {{- else -}} + auth + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for architecture + +Usage: +{{ include "common.mongodb.values.architecture" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether MongoDB® is used as subchart or not. Default: false +*/}} +{{- define "common.mongodb.values.architecture" -}} + {{- if .subchart -}} + {{- .context.Values.mongodb.architecture -}} + {{- else -}} + {{- .context.Values.architecture -}} + {{- end -}} +{{- end -}} diff --git a/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/charts/postgresql/charts/common/templates/validations/_mysql.tpl b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/charts/postgresql/charts/common/templates/validations/_mysql.tpl new file mode 100644 index 00000000..74472a06 --- /dev/null +++ b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/charts/postgresql/charts/common/templates/validations/_mysql.tpl @@ -0,0 +1,103 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Validate MySQL required passwords are not empty. + +Usage: +{{ include "common.validations.values.mysql.passwords" (dict "secret" "secretName" "subchart" false "context" $) }} +Params: + - secret - String - Required. Name of the secret where MySQL values are stored, e.g: "mysql-passwords-secret" + - subchart - Boolean - Optional. Whether MySQL is used as subchart or not. Default: false +*/}} +{{- define "common.validations.values.mysql.passwords" -}} + {{- $existingSecret := include "common.mysql.values.auth.existingSecret" . -}} + {{- $enabled := include "common.mysql.values.enabled" . -}} + {{- $architecture := include "common.mysql.values.architecture" . -}} + {{- $authPrefix := include "common.mysql.values.key.auth" . -}} + {{- $valueKeyRootPassword := printf "%s.rootPassword" $authPrefix -}} + {{- $valueKeyUsername := printf "%s.username" $authPrefix -}} + {{- $valueKeyPassword := printf "%s.password" $authPrefix -}} + {{- $valueKeyReplicationPassword := printf "%s.replicationPassword" $authPrefix -}} + + {{- if and (or (not $existingSecret) (eq $existingSecret "\"\"")) (eq $enabled "true") -}} + {{- $requiredPasswords := list -}} + + {{- $requiredRootPassword := dict "valueKey" $valueKeyRootPassword "secret" .secret "field" "mysql-root-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredRootPassword -}} + + {{- $valueUsername := include "common.utils.getValueFromKey" (dict "key" $valueKeyUsername "context" .context) }} + {{- if not (empty $valueUsername) -}} + {{- $requiredPassword := dict "valueKey" $valueKeyPassword "secret" .secret "field" "mysql-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredPassword -}} + {{- end -}} + + {{- if (eq $architecture "replication") -}} + {{- $requiredReplicationPassword := dict "valueKey" $valueKeyReplicationPassword "secret" .secret "field" "mysql-replication-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredReplicationPassword -}} + {{- end -}} + + {{- include "common.validations.values.multiple.empty" (dict "required" $requiredPasswords "context" .context) -}} + + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for existingSecret. + +Usage: +{{ include "common.mysql.values.auth.existingSecret" (dict "context" $) }} +Params: + - subchart - Boolean - Optional. Whether MySQL is used as subchart or not. Default: false +*/}} +{{- define "common.mysql.values.auth.existingSecret" -}} + {{- if .subchart -}} + {{- .context.Values.mysql.auth.existingSecret | quote -}} + {{- else -}} + {{- .context.Values.auth.existingSecret | quote -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for enabled mysql. + +Usage: +{{ include "common.mysql.values.enabled" (dict "context" $) }} +*/}} +{{- define "common.mysql.values.enabled" -}} + {{- if .subchart -}} + {{- printf "%v" .context.Values.mysql.enabled -}} + {{- else -}} + {{- printf "%v" (not .context.Values.enabled) -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for architecture + +Usage: +{{ include "common.mysql.values.architecture" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether MySQL is used as subchart or not. Default: false +*/}} +{{- define "common.mysql.values.architecture" -}} + {{- if .subchart -}} + {{- .context.Values.mysql.architecture -}} + {{- else -}} + {{- .context.Values.architecture -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for the key auth + +Usage: +{{ include "common.mysql.values.key.auth" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether MySQL is used as subchart or not. Default: false +*/}} +{{- define "common.mysql.values.key.auth" -}} + {{- if .subchart -}} + mysql.auth + {{- else -}} + auth + {{- end -}} +{{- end -}} diff --git a/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/charts/postgresql/charts/common/templates/validations/_postgresql.tpl b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/charts/postgresql/charts/common/templates/validations/_postgresql.tpl new file mode 100644 index 00000000..164ec0d0 --- /dev/null +++ b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/charts/postgresql/charts/common/templates/validations/_postgresql.tpl @@ -0,0 +1,129 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Validate PostgreSQL required passwords are not empty. + +Usage: +{{ include "common.validations.values.postgresql.passwords" (dict "secret" "secretName" "subchart" false "context" $) }} +Params: + - secret - String - Required. Name of the secret where postgresql values are stored, e.g: "postgresql-passwords-secret" + - subchart - Boolean - Optional. Whether postgresql is used as subchart or not. Default: false +*/}} +{{- define "common.validations.values.postgresql.passwords" -}} + {{- $existingSecret := include "common.postgresql.values.existingSecret" . -}} + {{- $enabled := include "common.postgresql.values.enabled" . -}} + {{- $valueKeyPostgresqlPassword := include "common.postgresql.values.key.postgressPassword" . -}} + {{- $valueKeyPostgresqlReplicationEnabled := include "common.postgresql.values.key.replicationPassword" . -}} + {{- if and (or (not $existingSecret) (eq $existingSecret "\"\"")) (eq $enabled "true") -}} + {{- $requiredPasswords := list -}} + {{- $requiredPostgresqlPassword := dict "valueKey" $valueKeyPostgresqlPassword "secret" .secret "field" "postgresql-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredPostgresqlPassword -}} + + {{- $enabledReplication := include "common.postgresql.values.enabled.replication" . -}} + {{- if (eq $enabledReplication "true") -}} + {{- $requiredPostgresqlReplicationPassword := dict "valueKey" $valueKeyPostgresqlReplicationEnabled "secret" .secret "field" "postgresql-replication-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredPostgresqlReplicationPassword -}} + {{- end -}} + + {{- include "common.validations.values.multiple.empty" (dict "required" $requiredPasswords "context" .context) -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to decide whether evaluate global values. + +Usage: +{{ include "common.postgresql.values.use.global" (dict "key" "key-of-global" "context" $) }} +Params: + - key - String - Required. Field to be evaluated within global, e.g: "existingSecret" +*/}} +{{- define "common.postgresql.values.use.global" -}} + {{- if .context.Values.global -}} + {{- if .context.Values.global.postgresql -}} + {{- index .context.Values.global.postgresql .key | quote -}} + {{- end -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for existingSecret. + +Usage: +{{ include "common.postgresql.values.existingSecret" (dict "context" $) }} +*/}} +{{- define "common.postgresql.values.existingSecret" -}} + {{- $globalValue := include "common.postgresql.values.use.global" (dict "key" "existingSecret" "context" .context) -}} + + {{- if .subchart -}} + {{- default (.context.Values.postgresql.existingSecret | quote) $globalValue -}} + {{- else -}} + {{- default (.context.Values.existingSecret | quote) $globalValue -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for enabled postgresql. + +Usage: +{{ include "common.postgresql.values.enabled" (dict "context" $) }} +*/}} +{{- define "common.postgresql.values.enabled" -}} + {{- if .subchart -}} + {{- printf "%v" .context.Values.postgresql.enabled -}} + {{- else -}} + {{- printf "%v" (not .context.Values.enabled) -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for the key postgressPassword. + +Usage: +{{ include "common.postgresql.values.key.postgressPassword" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether postgresql is used as subchart or not. Default: false +*/}} +{{- define "common.postgresql.values.key.postgressPassword" -}} + {{- $globalValue := include "common.postgresql.values.use.global" (dict "key" "postgresqlUsername" "context" .context) -}} + + {{- if not $globalValue -}} + {{- if .subchart -}} + postgresql.postgresqlPassword + {{- else -}} + postgresqlPassword + {{- end -}} + {{- else -}} + global.postgresql.postgresqlPassword + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for enabled.replication. + +Usage: +{{ include "common.postgresql.values.enabled.replication" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether postgresql is used as subchart or not. Default: false +*/}} +{{- define "common.postgresql.values.enabled.replication" -}} + {{- if .subchart -}} + {{- printf "%v" .context.Values.postgresql.replication.enabled -}} + {{- else -}} + {{- printf "%v" .context.Values.replication.enabled -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for the key replication.password. + +Usage: +{{ include "common.postgresql.values.key.replicationPassword" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether postgresql is used as subchart or not. Default: false +*/}} +{{- define "common.postgresql.values.key.replicationPassword" -}} + {{- if .subchart -}} + postgresql.replication.password + {{- else -}} + replication.password + {{- end -}} +{{- end -}} diff --git a/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/charts/postgresql/charts/common/templates/validations/_redis.tpl b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/charts/postgresql/charts/common/templates/validations/_redis.tpl new file mode 100644 index 00000000..dcccfc1a --- /dev/null +++ b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/charts/postgresql/charts/common/templates/validations/_redis.tpl @@ -0,0 +1,76 @@ + +{{/* vim: set filetype=mustache: */}} +{{/* +Validate Redis® required passwords are not empty. + +Usage: +{{ include "common.validations.values.redis.passwords" (dict "secret" "secretName" "subchart" false "context" $) }} +Params: + - secret - String - Required. Name of the secret where redis values are stored, e.g: "redis-passwords-secret" + - subchart - Boolean - Optional. Whether redis is used as subchart or not. Default: false +*/}} +{{- define "common.validations.values.redis.passwords" -}} + {{- $enabled := include "common.redis.values.enabled" . -}} + {{- $valueKeyPrefix := include "common.redis.values.keys.prefix" . -}} + {{- $standarizedVersion := include "common.redis.values.standarized.version" . }} + + {{- $existingSecret := ternary (printf "%s%s" $valueKeyPrefix "auth.existingSecret") (printf "%s%s" $valueKeyPrefix "existingSecret") (eq $standarizedVersion "true") }} + {{- $existingSecretValue := include "common.utils.getValueFromKey" (dict "key" $existingSecret "context" .context) }} + + {{- $valueKeyRedisPassword := ternary (printf "%s%s" $valueKeyPrefix "auth.password") (printf "%s%s" $valueKeyPrefix "password") (eq $standarizedVersion "true") }} + {{- $valueKeyRedisUseAuth := ternary (printf "%s%s" $valueKeyPrefix "auth.enabled") (printf "%s%s" $valueKeyPrefix "usePassword") (eq $standarizedVersion "true") }} + + {{- if and (or (not $existingSecret) (eq $existingSecret "\"\"")) (eq $enabled "true") -}} + {{- $requiredPasswords := list -}} + + {{- $useAuth := include "common.utils.getValueFromKey" (dict "key" $valueKeyRedisUseAuth "context" .context) -}} + {{- if eq $useAuth "true" -}} + {{- $requiredRedisPassword := dict "valueKey" $valueKeyRedisPassword "secret" .secret "field" "redis-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredRedisPassword -}} + {{- end -}} + + {{- include "common.validations.values.multiple.empty" (dict "required" $requiredPasswords "context" .context) -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for enabled redis. + +Usage: +{{ include "common.redis.values.enabled" (dict "context" $) }} +*/}} +{{- define "common.redis.values.enabled" -}} + {{- if .subchart -}} + {{- printf "%v" .context.Values.redis.enabled -}} + {{- else -}} + {{- printf "%v" (not .context.Values.enabled) -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right prefix path for the values + +Usage: +{{ include "common.redis.values.key.prefix" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether redis is used as subchart or not. Default: false +*/}} +{{- define "common.redis.values.keys.prefix" -}} + {{- if .subchart -}}redis.{{- else -}}{{- end -}} +{{- end -}} + +{{/* +Checks whether the redis chart's includes the standarizations (version >= 14) + +Usage: +{{ include "common.redis.values.standarized.version" (dict "context" $) }} +*/}} +{{- define "common.redis.values.standarized.version" -}} + + {{- $standarizedAuth := printf "%s%s" (include "common.redis.values.keys.prefix" .) "auth" -}} + {{- $standarizedAuthValues := include "common.utils.getValueFromKey" (dict "key" $standarizedAuth "context" .context) }} + + {{- if $standarizedAuthValues -}} + {{- true -}} + {{- end -}} +{{- end -}} diff --git a/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/charts/postgresql/charts/common/templates/validations/_validations.tpl b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/charts/postgresql/charts/common/templates/validations/_validations.tpl new file mode 100644 index 00000000..9a814cf4 --- /dev/null +++ b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/charts/postgresql/charts/common/templates/validations/_validations.tpl @@ -0,0 +1,46 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Validate values must not be empty. + +Usage: +{{- $validateValueConf00 := (dict "valueKey" "path.to.value" "secret" "secretName" "field" "password-00") -}} +{{- $validateValueConf01 := (dict "valueKey" "path.to.value" "secret" "secretName" "field" "password-01") -}} +{{ include "common.validations.values.empty" (dict "required" (list $validateValueConf00 $validateValueConf01) "context" $) }} + +Validate value params: + - valueKey - String - Required. The path to the validating value in the values.yaml, e.g: "mysql.password" + - secret - String - Optional. Name of the secret where the validating value is generated/stored, e.g: "mysql-passwords-secret" + - field - String - Optional. Name of the field in the secret data, e.g: "mysql-password" +*/}} +{{- define "common.validations.values.multiple.empty" -}} + {{- range .required -}} + {{- include "common.validations.values.single.empty" (dict "valueKey" .valueKey "secret" .secret "field" .field "context" $.context) -}} + {{- end -}} +{{- end -}} + +{{/* +Validate a value must not be empty. + +Usage: +{{ include "common.validations.value.empty" (dict "valueKey" "mariadb.password" "secret" "secretName" "field" "my-password" "subchart" "subchart" "context" $) }} + +Validate value params: + - valueKey - String - Required. The path to the validating value in the values.yaml, e.g: "mysql.password" + - secret - String - Optional. Name of the secret where the validating value is generated/stored, e.g: "mysql-passwords-secret" + - field - String - Optional. Name of the field in the secret data, e.g: "mysql-password" + - subchart - String - Optional - Name of the subchart that the validated password is part of. +*/}} +{{- define "common.validations.values.single.empty" -}} + {{- $value := include "common.utils.getValueFromKey" (dict "key" .valueKey "context" .context) }} + {{- $subchart := ternary "" (printf "%s." .subchart) (empty .subchart) }} + + {{- if not $value -}} + {{- $varname := "my-value" -}} + {{- $getCurrentValue := "" -}} + {{- if and .secret .field -}} + {{- $varname = include "common.utils.fieldToEnvVar" . -}} + {{- $getCurrentValue = printf " To get the current value:\n\n %s\n" (include "common.utils.secret.getvalue" .) -}} + {{- end -}} + {{- printf "\n '%s' must not be empty, please add '--set %s%s=$%s' to the command.%s" .valueKey $subchart .valueKey $varname $getCurrentValue -}} + {{- end -}} +{{- end -}} diff --git a/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/charts/postgresql/charts/common/values.yaml b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/charts/postgresql/charts/common/values.yaml new file mode 100644 index 00000000..f2df68e5 --- /dev/null +++ b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/charts/postgresql/charts/common/values.yaml @@ -0,0 +1,5 @@ +## bitnami/common +## It is required by CI/CD tools and processes. +## @skip exampleValue +## +exampleValue: common-chart diff --git a/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/charts/postgresql/templates/NOTES.txt b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/charts/postgresql/templates/NOTES.txt new file mode 100644 index 00000000..e0474d4b --- /dev/null +++ b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/charts/postgresql/templates/NOTES.txt @@ -0,0 +1,89 @@ +CHART NAME: {{ .Chart.Name }} +CHART VERSION: {{ .Chart.Version }} +APP VERSION: {{ .Chart.AppVersion }} + +** Please be patient while the chart is being deployed ** + +{{- if .Values.diagnosticMode.enabled }} +The chart has been deployed in diagnostic mode. All probes have been disabled and the command has been overwritten with: + + command: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.command "context" $) | nindent 4 }} + args: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.args "context" $) | nindent 4 }} + +Get the list of pods by executing: + + kubectl get pods --namespace {{ .Release.Namespace }} -l app.kubernetes.io/instance={{ .Release.Name }} + +Access the pod you want to debug by executing + + kubectl exec --namespace {{ .Release.Namespace }} -ti -- /opt/bitnami/scripts/postgresql/entrypoint.sh /bin/bash + +In order to replicate the container startup scripts execute this command: + + /opt/bitnami/scripts/postgresql/entrypoint.sh /opt/bitnami/scripts/postgresql/run.sh + +{{- else }} + +PostgreSQL can be accessed via port {{ include "postgresql.service.port" . }} on the following DNS names from within your cluster: + + {{ include "postgresql.primary.fullname" . }}.{{ .Release.Namespace }}.svc.cluster.local - Read/Write connection + +{{- if eq .Values.architecture "replication" }} + + {{ include "postgresql.readReplica.fullname" . }}.{{ .Release.Namespace }}.svc.cluster.local - Read only connection + +{{- end }} + +{{- $customUser := include "postgresql.username" . }} +{{- if and (not (empty $customUser)) (ne $customUser "postgres") .Values.auth.enablePostgresUser }} + +To get the password for "postgres" run: + + export POSTGRES_ADMIN_PASSWORD=$(kubectl get secret --namespace {{ .Release.Namespace }} {{ include "postgresql.secretName" . }} -o jsonpath="{.data.postgres-password}" | base64 -d) + +To get the password for "{{ $customUser }}" run: + + export POSTGRES_PASSWORD=$(kubectl get secret --namespace {{ .Release.Namespace }} {{ include "postgresql.secretName" . }} -o jsonpath="{.data.password}" | base64 -d) + +{{- else }} + +To get the password for "{{ default "postgres" $customUser }}" run: + + export POSTGRES_PASSWORD=$(kubectl get secret --namespace {{ .Release.Namespace }} {{ include "postgresql.secretName" . }} -o jsonpath="{.data.{{ ternary "password" "postgres-password" (and (not (empty $customUser)) (ne $customUser "postgres")) }}}" | base64 -d) + +{{- end }} + +To connect to your database run the following command: + + kubectl run {{ include "common.names.fullname" . }}-client --rm --tty -i --restart='Never' --namespace {{ .Release.Namespace }} --image {{ include "postgresql.image" . }} --env="PGPASSWORD=$POSTGRES_PASSWORD" \ + --command -- psql --host {{ include "postgresql.primary.fullname" . }} -U {{ default "postgres" $customUser }} -d {{- if include "postgresql.database" . }} {{ include "postgresql.database" . }}{{- else }} postgres{{- end }} -p {{ include "postgresql.service.port" . }} + + > NOTE: If you access the container using bash, make sure that you execute "/opt/bitnami/scripts/postgresql/entrypoint.sh /bin/bash" in order to avoid the error "psql: local user with ID {{ .Values.primary.containerSecurityContext.runAsUser }}} does not exist" + +To connect to your database from outside the cluster execute the following commands: + +{{- if contains "NodePort" .Values.primary.service.type }} + + export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}") + export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ include "postgresql.primary.fullname" . }}) + PGPASSWORD="$POSTGRES_PASSWORD" psql --host $NODE_IP --port $NODE_PORT -U {{ default "postgres" $customUser }} -d {{- if include "postgresql.database" . }} {{ include "postgresql.database" . }}{{- else }} postgres{{- end }} + +{{- else if contains "LoadBalancer" .Values.primary.service.type }} + + NOTE: It may take a few minutes for the LoadBalancer IP to be available. + Watch the status with: 'kubectl get svc --namespace {{ .Release.Namespace }} -w {{ include "postgresql.primary.fullname" . }}' + + export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ include "postgresql.primary.fullname" . }} --template "{{ "{{ range (index .status.loadBalancer.ingress 0) }}{{ . }}{{ end }}" }}") + PGPASSWORD="$POSTGRES_PASSWORD" psql --host $SERVICE_IP --port {{ include "postgresql.service.port" . }} -U {{ default "postgres" $customUser }} -d {{- if include "postgresql.database" . }} {{ include "postgresql.database" . }}{{- else }} postgres{{- end }} + +{{- else if contains "ClusterIP" .Values.primary.service.type }} + + kubectl port-forward --namespace {{ .Release.Namespace }} svc/{{ include "postgresql.primary.fullname" . }} {{ include "postgresql.service.port" . }}:{{ include "postgresql.service.port" . }} & + PGPASSWORD="$POSTGRES_PASSWORD" psql --host 127.0.0.1 -U {{ default "postgres" $customUser }} -d {{- if include "postgresql.database" . }} {{ include "postgresql.database" . }}{{- else }} postgres{{- end }} -p {{ include "postgresql.service.port" . }} + +{{- end }} +{{- end }} + +{{- include "postgresql.validateValues" . -}} +{{- include "common.warnings.rollingTag" .Values.image -}} +{{- include "common.warnings.rollingTag" .Values.volumePermissions.image }} diff --git a/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/charts/postgresql/templates/_helpers.tpl b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/charts/postgresql/templates/_helpers.tpl new file mode 100644 index 00000000..ae11d320 --- /dev/null +++ b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/charts/postgresql/templates/_helpers.tpl @@ -0,0 +1,382 @@ +{{/* vim: set filetype=mustache: */}} + +{{/* +Create a default fully qualified app name for PostgreSQL Primary objects +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +*/}} +{{- define "postgresql.primary.fullname" -}} +{{- if eq .Values.architecture "replication" }} + {{- printf "%s-primary" (include "common.names.fullname" .) | trunc 63 | trimSuffix "-" -}} +{{- else -}} + {{- include "common.names.fullname" . -}} +{{- end -}} +{{- end -}} + +{{/* +Create a default fully qualified app name for PostgreSQL read-only replicas objects +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +*/}} +{{- define "postgresql.readReplica.fullname" -}} +{{- printf "%s-read" (include "common.names.fullname" .) | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create the default FQDN for PostgreSQL primary headless service +We truncate at 63 chars because of the DNS naming spec. +*/}} +{{- define "postgresql.primary.svc.headless" -}} +{{- printf "%s-hl" (include "postgresql.primary.fullname" .) | trunc 63 | trimSuffix "-" }} +{{- end -}} + +{{/* +Create the default FQDN for PostgreSQL read-only replicas headless service +We truncate at 63 chars because of the DNS naming spec. +*/}} +{{- define "postgresql.readReplica.svc.headless" -}} +{{- printf "%s-hl" (include "postgresql.readReplica.fullname" .) | trunc 63 | trimSuffix "-" }} +{{- end -}} + +{{/* +Return the proper PostgreSQL image name +*/}} +{{- define "postgresql.image" -}} +{{ include "common.images.image" (dict "imageRoot" .Values.image "global" .Values.global) }} +{{- end -}} + +{{/* +Return the proper PostgreSQL metrics image name +*/}} +{{- define "postgresql.metrics.image" -}} +{{ include "common.images.image" (dict "imageRoot" .Values.metrics.image "global" .Values.global) }} +{{- end -}} + +{{/* +Return the proper image name (for the init container volume-permissions image) +*/}} +{{- define "postgresql.volumePermissions.image" -}} +{{ include "common.images.image" (dict "imageRoot" .Values.volumePermissions.image "global" .Values.global) }} +{{- end -}} + +{{/* +Return the proper Docker Image Registry Secret Names +*/}} +{{- define "postgresql.imagePullSecrets" -}} +{{ include "common.images.pullSecrets" (dict "images" (list .Values.image .Values.metrics.image .Values.volumePermissions.image) "global" .Values.global) }} +{{- end -}} + +{{/* +Return the name for a custom user to create +*/}} +{{- define "postgresql.username" -}} +{{- if .Values.global.postgresql.auth.username }} + {{- .Values.global.postgresql.auth.username -}} +{{- else -}} + {{- .Values.auth.username -}} +{{- end -}} +{{- end -}} + +{{/* +Return the name for a custom database to create +*/}} +{{- define "postgresql.database" -}} +{{- if .Values.global.postgresql.auth.database }} + {{- .Values.global.postgresql.auth.database -}} +{{- else if .Values.auth.database -}} + {{- .Values.auth.database -}} +{{- end -}} +{{- end -}} + +{{/* +Get the password secret. +*/}} +{{- define "postgresql.secretName" -}} +{{- if .Values.global.postgresql.auth.existingSecret }} + {{- printf "%s" (tpl .Values.global.postgresql.auth.existingSecret $) -}} +{{- else if .Values.auth.existingSecret -}} + {{- printf "%s" (tpl .Values.auth.existingSecret $) -}} +{{- else -}} + {{- printf "%s" (include "common.names.fullname" .) -}} +{{- end -}} +{{- end -}} + +{{/* +Get the replication-password key. +*/}} +{{- define "postgresql.replicationPasswordKey" -}} +{{- if or .Values.global.postgresql.auth.existingSecret .Values.auth.existingSecret }} + {{- if .Values.global.postgresql.auth.secretKeys.replicationPasswordKey }} + {{- printf "%s" (tpl .Values.global.postgresql.auth.secretKeys.replicationPasswordKey $) -}} + {{- else if .Values.auth.secretKeys.replicationPasswordKey -}} + {{- printf "%s" (tpl .Values.auth.secretKeys.replicationPasswordKey $) -}} + {{- else -}} + {{- "replication-password" -}} + {{- end -}} +{{- else -}} + {{- "replication-password" -}} +{{- end -}} +{{- end -}} + +{{/* +Get the admin-password key. +*/}} +{{- define "postgresql.adminPasswordKey" -}} +{{- if or .Values.global.postgresql.auth.existingSecret .Values.auth.existingSecret }} + {{- if .Values.global.postgresql.auth.secretKeys.adminPasswordKey }} + {{- printf "%s" (tpl .Values.global.postgresql.auth.secretKeys.adminPasswordKey $) -}} + {{- else if .Values.auth.secretKeys.adminPasswordKey -}} + {{- printf "%s" (tpl .Values.auth.secretKeys.adminPasswordKey $) -}} + {{- end -}} +{{- else -}} + {{- "postgres-password" -}} +{{- end -}} +{{- end -}} + +{{/* +Get the user-password key. +*/}} +{{- define "postgresql.userPasswordKey" -}} +{{- if or .Values.global.postgresql.auth.existingSecret .Values.auth.existingSecret }} + {{- if or (empty (include "postgresql.username" .)) (eq (include "postgresql.username" .) "postgres") }} + {{- printf "%s" (include "postgresql.adminPasswordKey" .) -}} + {{- else -}} + {{- if .Values.global.postgresql.auth.secretKeys.userPasswordKey }} + {{- printf "%s" (tpl .Values.global.postgresql.auth.secretKeys.userPasswordKey $) -}} + {{- else if .Values.auth.secretKeys.userPasswordKey -}} + {{- printf "%s" (tpl .Values.auth.secretKeys.userPasswordKey $) -}} + {{- end -}} + {{- end -}} +{{- else -}} + {{- ternary "password" "postgres-password" (and (not (empty (include "postgresql.username" .))) (ne (include "postgresql.username" .) "postgres")) -}} +{{- end -}} +{{- end -}} + +{{/* +Return true if a secret object should be created +*/}} +{{- define "postgresql.createSecret" -}} +{{- if not (or .Values.global.postgresql.auth.existingSecret .Values.auth.existingSecret) -}} + {{- true -}} +{{- end -}} +{{- end -}} + +{{/* +Return PostgreSQL service port +*/}} +{{- define "postgresql.service.port" -}} +{{- if .Values.global.postgresql.service.ports.postgresql }} + {{- .Values.global.postgresql.service.ports.postgresql -}} +{{- else -}} + {{- .Values.primary.service.ports.postgresql -}} +{{- end -}} +{{- end -}} + +{{/* +Return PostgreSQL service port +*/}} +{{- define "postgresql.readReplica.service.port" -}} +{{- if .Values.global.postgresql.service.ports.postgresql }} + {{- .Values.global.postgresql.service.ports.postgresql -}} +{{- else -}} + {{- .Values.readReplicas.service.ports.postgresql -}} +{{- end -}} +{{- end -}} + +{{/* +Get the PostgreSQL primary configuration ConfigMap name. +*/}} +{{- define "postgresql.primary.configmapName" -}} +{{- if .Values.primary.existingConfigmap -}} + {{- printf "%s" (tpl .Values.primary.existingConfigmap $) -}} +{{- else -}} + {{- printf "%s-configuration" (include "postgresql.primary.fullname" .) -}} +{{- end -}} +{{- end -}} + +{{/* +Return true if a configmap object should be created for PostgreSQL primary with the configuration +*/}} +{{- define "postgresql.primary.createConfigmap" -}} +{{- if and (or .Values.primary.configuration .Values.primary.pgHbaConfiguration) (not .Values.primary.existingConfigmap) }} + {{- true -}} +{{- else -}} +{{- end -}} +{{- end -}} + +{{/* +Get the PostgreSQL primary extended configuration ConfigMap name. +*/}} +{{- define "postgresql.primary.extendedConfigmapName" -}} +{{- if .Values.primary.existingExtendedConfigmap -}} + {{- printf "%s" (tpl .Values.primary.existingExtendedConfigmap $) -}} +{{- else -}} + {{- printf "%s-extended-configuration" (include "postgresql.primary.fullname" .) -}} +{{- end -}} +{{- end -}} + +{{/* +Return true if a configmap object should be created for PostgreSQL primary with the extended configuration +*/}} +{{- define "postgresql.primary.createExtendedConfigmap" -}} +{{- if and .Values.primary.extendedConfiguration (not .Values.primary.existingExtendedConfigmap) }} + {{- true -}} +{{- else -}} +{{- end -}} +{{- end -}} + +{{/* + Create the name of the service account to use + */}} +{{- define "postgresql.serviceAccountName" -}} +{{- if .Values.serviceAccount.create -}} + {{ default (include "common.names.fullname" .) .Values.serviceAccount.name }} +{{- else -}} + {{ default "default" .Values.serviceAccount.name }} +{{- end -}} +{{- end -}} + +{{/* +Return true if a configmap should be mounted with PostgreSQL configuration +*/}} +{{- define "postgresql.mountConfigurationCM" -}} +{{- if or .Values.primary.configuration .Values.primary.pgHbaConfiguration .Values.primary.existingConfigmap }} + {{- true -}} +{{- end -}} +{{- end -}} + +{{/* +Get the initialization scripts ConfigMap name. +*/}} +{{- define "postgresql.initdb.scriptsCM" -}} +{{- if .Values.primary.initdb.scriptsConfigMap -}} + {{- printf "%s" (tpl .Values.primary.initdb.scriptsConfigMap $) -}} +{{- else -}} + {{- printf "%s-init-scripts" (include "postgresql.primary.fullname" .) -}} +{{- end -}} +{{- end -}} + +{/* +Return true if TLS is enabled for LDAP connection +*/}} +{{- define "postgresql.ldap.tls.enabled" -}} +{{- if and (kindIs "string" .Values.ldap.tls) (not (empty .Values.ldap.tls)) }} + {{- true -}} +{{- else if and (kindIs "map" .Values.ldap.tls) .Values.ldap.tls.enabled }} + {{- true -}} +{{- end -}} +{{- end -}} + +{{/* +Get the readiness probe command +*/}} +{{- define "postgresql.readinessProbeCommand" -}} +{{- $customUser := include "postgresql.username" . }} +- | +{{- if (include "postgresql.database" .) }} + exec pg_isready -U {{ default "postgres" $customUser | quote }} -d "dbname={{ include "postgresql.database" . }} {{- if .Values.tls.enabled }} sslcert={{ include "postgresql.tlsCert" . }} sslkey={{ include "postgresql.tlsCertKey" . }}{{- end }}" -h 127.0.0.1 -p {{ .Values.containerPorts.postgresql }} +{{- else }} + exec pg_isready -U {{ default "postgres" $customUser | quote }} {{- if .Values.tls.enabled }} -d "sslcert={{ include "postgresql.tlsCert" . }} sslkey={{ include "postgresql.tlsCertKey" . }}"{{- end }} -h 127.0.0.1 -p {{ .Values.containerPorts.postgresql }} +{{- end }} +{{- if contains "bitnami/" .Values.image.repository }} + [ -f /opt/bitnami/postgresql/tmp/.initialized ] || [ -f /bitnami/postgresql/.initialized ] +{{- end -}} +{{- end -}} + +{{/* +Compile all warnings into a single message, and call fail. +*/}} +{{- define "postgresql.validateValues" -}} +{{- $messages := list -}} +{{- $messages := append $messages (include "postgresql.validateValues.ldapConfigurationMethod" .) -}} +{{- $messages := append $messages (include "postgresql.validateValues.psp" .) -}} +{{- $messages := without $messages "" -}} +{{- $message := join "\n" $messages -}} + +{{- if $message -}} +{{- printf "\nVALUES VALIDATION:\n%s" $message | fail -}} +{{- end -}} +{{- end -}} + +{{/* +Validate values of Postgresql - If ldap.url is used then you don't need the other settings for ldap +*/}} +{{- define "postgresql.validateValues.ldapConfigurationMethod" -}} +{{- if and .Values.ldap.enabled (and (not (empty .Values.ldap.url)) (not (empty .Values.ldap.server))) }} +postgresql: ldap.url, ldap.server + You cannot set both `ldap.url` and `ldap.server` at the same time. + Please provide a unique way to configure LDAP. + More info at https://www.postgresql.org/docs/current/auth-ldap.html +{{- end -}} +{{- end -}} + +{{/* +Validate values of Postgresql - If PSP is enabled RBAC should be enabled too +*/}} +{{- define "postgresql.validateValues.psp" -}} +{{- if and .Values.psp.create (not .Values.rbac.create) }} +postgresql: psp.create, rbac.create + RBAC should be enabled if PSP is enabled in order for PSP to work. + More info at https://kubernetes.io/docs/concepts/policy/pod-security-policy/#authorizing-policies +{{- end -}} +{{- end -}} + +{{/* +Return the path to the cert file. +*/}} +{{- define "postgresql.tlsCert" -}} +{{- if .Values.tls.autoGenerated }} + {{- printf "/opt/bitnami/postgresql/certs/tls.crt" -}} +{{- else -}} + {{- required "Certificate filename is required when TLS in enabled" .Values.tls.certFilename | printf "/opt/bitnami/postgresql/certs/%s" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the path to the cert key file. +*/}} +{{- define "postgresql.tlsCertKey" -}} +{{- if .Values.tls.autoGenerated }} + {{- printf "/opt/bitnami/postgresql/certs/tls.key" -}} +{{- else -}} +{{- required "Certificate Key filename is required when TLS in enabled" .Values.tls.certKeyFilename | printf "/opt/bitnami/postgresql/certs/%s" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the path to the CA cert file. +*/}} +{{- define "postgresql.tlsCACert" -}} +{{- if .Values.tls.autoGenerated }} + {{- printf "/opt/bitnami/postgresql/certs/ca.crt" -}} +{{- else -}} + {{- printf "/opt/bitnami/postgresql/certs/%s" .Values.tls.certCAFilename -}} +{{- end -}} +{{- end -}} + +{{/* +Return the path to the CRL file. +*/}} +{{- define "postgresql.tlsCRL" -}} +{{- if .Values.tls.crlFilename -}} +{{- printf "/opt/bitnami/postgresql/certs/%s" .Values.tls.crlFilename -}} +{{- end -}} +{{- end -}} + +{{/* +Return true if a TLS credentials secret object should be created +*/}} +{{- define "postgresql.createTlsSecret" -}} +{{- if and .Values.tls.autoGenerated (not .Values.tls.certificatesSecret) }} + {{- true -}} +{{- end -}} +{{- end -}} + +{{/* +Return the path to the CA cert file. +*/}} +{{- define "postgresql.tlsSecretName" -}} +{{- if .Values.tls.autoGenerated }} + {{- printf "%s-crt" (include "common.names.fullname" .) -}} +{{- else -}} + {{ required "A secret containing TLS certificates is required when TLS is enabled" .Values.tls.certificatesSecret }} +{{- end -}} +{{- end -}} diff --git a/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/charts/postgresql/templates/extra-list.yaml b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/charts/postgresql/templates/extra-list.yaml new file mode 100644 index 00000000..9ac65f9e --- /dev/null +++ b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/charts/postgresql/templates/extra-list.yaml @@ -0,0 +1,4 @@ +{{- range .Values.extraDeploy }} +--- +{{ include "common.tplvalues.render" (dict "value" . "context" $) }} +{{- end }} diff --git a/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/charts/postgresql/templates/networkpolicy-egress.yaml b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/charts/postgresql/templates/networkpolicy-egress.yaml new file mode 100644 index 00000000..e8621474 --- /dev/null +++ b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/charts/postgresql/templates/networkpolicy-egress.yaml @@ -0,0 +1,32 @@ +{{- if and .Values.networkPolicy.enabled (or .Values.networkPolicy.egressRules.denyConnectionsToExternal .Values.networkPolicy.egressRules.customRules) }} +apiVersion: {{ include "common.capabilities.networkPolicy.apiVersion" . }} +kind: NetworkPolicy +metadata: + name: {{ printf "%s-egress" (include "common.names.fullname" .) }} + namespace: {{ .Release.Namespace }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + podSelector: + matchLabels: {{- include "common.labels.matchLabels" . | nindent 6 }} + policyTypes: + - Egress + egress: + {{- if .Values.networkPolicy.egressRules.denyConnectionsToExternal }} + - ports: + - port: 53 + protocol: UDP + - port: 53 + protocol: TCP + - to: + - namespaceSelector: {} + {{- end }} + {{- if .Values.networkPolicy.egressRules.customRules }} + {{- include "common.tplvalues.render" (dict "value" .Values.networkPolicy.egressRules.customRules "context" $) | nindent 4 }} + {{- end }} +{{- end }} diff --git a/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/charts/postgresql/templates/primary/configmap.yaml b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/charts/postgresql/templates/primary/configmap.yaml new file mode 100644 index 00000000..d654a225 --- /dev/null +++ b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/charts/postgresql/templates/primary/configmap.yaml @@ -0,0 +1,24 @@ +{{- if (include "postgresql.primary.createConfigmap" .) }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ printf "%s-configuration" (include "postgresql.primary.fullname" .) }} + namespace: {{ .Release.Namespace | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/component: primary + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +data: + {{- if .Values.primary.configuration }} + postgresql.conf: |- + {{- include "common.tplvalues.render" ( dict "value" .Values.primary.configuration "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.primary.pgHbaConfiguration }} + pg_hba.conf: | + {{- include "common.tplvalues.render" ( dict "value" .Values.primary.pgHbaConfiguration "context" $ ) | nindent 4 }} + {{- end }} +{{- end }} diff --git a/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/charts/postgresql/templates/primary/extended-configmap.yaml b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/charts/postgresql/templates/primary/extended-configmap.yaml new file mode 100644 index 00000000..d129bd3b --- /dev/null +++ b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/charts/postgresql/templates/primary/extended-configmap.yaml @@ -0,0 +1,18 @@ +{{- if (include "postgresql.primary.createExtendedConfigmap" .) }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ printf "%s-extended-configuration" (include "postgresql.primary.fullname" .) }} + namespace: {{ .Release.Namespace | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/component: primary + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +data: + override.conf: |- + {{- include "common.tplvalues.render" ( dict "value" .Values.primary.extendedConfiguration "context" $ ) | nindent 4 }} +{{- end }} diff --git a/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/charts/postgresql/templates/primary/initialization-configmap.yaml b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/charts/postgresql/templates/primary/initialization-configmap.yaml new file mode 100644 index 00000000..d3d26cb8 --- /dev/null +++ b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/charts/postgresql/templates/primary/initialization-configmap.yaml @@ -0,0 +1,15 @@ +{{- if and .Values.primary.initdb.scripts (not .Values.primary.initdb.scriptsConfigMap) }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ printf "%s-init-scripts" (include "postgresql.primary.fullname" .) }} + namespace: {{ .Release.Namespace | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +data: {{- include "common.tplvalues.render" (dict "value" .Values.primary.initdb.scripts "context" .) | nindent 2 }} +{{- end }} diff --git a/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/charts/postgresql/templates/primary/metrics-configmap.yaml b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/charts/postgresql/templates/primary/metrics-configmap.yaml new file mode 100644 index 00000000..8ad2f35f --- /dev/null +++ b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/charts/postgresql/templates/primary/metrics-configmap.yaml @@ -0,0 +1,16 @@ +{{- if and .Values.metrics.enabled .Values.metrics.customMetrics }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ printf "%s-metrics" (include "postgresql.primary.fullname" .) }} + namespace: {{ .Release.Namespace | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +data: + custom-metrics.yaml: {{ toYaml .Values.metrics.customMetrics | quote }} +{{- end }} diff --git a/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/charts/postgresql/templates/primary/metrics-svc.yaml b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/charts/postgresql/templates/primary/metrics-svc.yaml new file mode 100644 index 00000000..75a1b81b --- /dev/null +++ b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/charts/postgresql/templates/primary/metrics-svc.yaml @@ -0,0 +1,31 @@ +{{- if .Values.metrics.enabled }} +apiVersion: v1 +kind: Service +metadata: + name: {{ printf "%s-metrics" (include "postgresql.primary.fullname" .) }} + namespace: {{ .Release.Namespace | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/component: metrics + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + annotations: + {{- if .Values.commonAnnotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.metrics.service.annotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.metrics.service.annotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + type: ClusterIP + sessionAffinity: {{ .Values.metrics.service.sessionAffinity }} + {{- if .Values.metrics.service.clusterIP }} + clusterIP: {{ .Values.metrics.service.clusterIP }} + {{- end }} + ports: + - name: http-metrics + port: {{ .Values.metrics.service.ports.metrics }} + targetPort: http-metrics + selector: {{- include "common.labels.matchLabels" . | nindent 4 }} + app.kubernetes.io/component: primary +{{- end }} diff --git a/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/charts/postgresql/templates/primary/networkpolicy.yaml b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/charts/postgresql/templates/primary/networkpolicy.yaml new file mode 100644 index 00000000..ce0052d4 --- /dev/null +++ b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/charts/postgresql/templates/primary/networkpolicy.yaml @@ -0,0 +1,57 @@ +{{- if and .Values.networkPolicy.enabled (or .Values.networkPolicy.metrics.enabled .Values.networkPolicy.ingressRules.primaryAccessOnlyFrom.enabled) }} +apiVersion: {{ include "common.capabilities.networkPolicy.apiVersion" . }} +kind: NetworkPolicy +metadata: + name: {{ printf "%s-ingress" (include "postgresql.primary.fullname" .) }} + namespace: {{ .Release.Namespace | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/component: primary + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + podSelector: + matchLabels: {{- include "common.labels.matchLabels" . | nindent 6 }} + app.kubernetes.io/component: primary + ingress: + {{- if and .Values.metrics.enabled .Values.networkPolicy.metrics.enabled (or .Values.networkPolicy.metrics.namespaceSelector .Values.networkPolicy.metrics.podSelector) }} + - from: + {{- if .Values.networkPolicy.metrics.namespaceSelector }} + - namespaceSelector: + matchLabels: {{- include "common.tplvalues.render" (dict "value" .Values.networkPolicy.metrics.namespaceSelector "context" $) | nindent 14 }} + {{- end }} + {{- if .Values.networkPolicy.metrics.podSelector }} + - podSelector: + matchLabels: {{- include "common.tplvalues.render" (dict "value" .Values.networkPolicy.metrics.podSelector "context" $) | nindent 14 }} + {{- end }} + ports: + - port: {{ .Values.metrics.containerPorts.metrics }} + {{- end }} + {{- if and .Values.networkPolicy.ingressRules.primaryAccessOnlyFrom.enabled (or .Values.networkPolicy.ingressRules.primaryAccessOnlyFrom.namespaceSelector .Values.networkPolicy.ingressRules.primaryAccessOnlyFrom.podSelector) }} + - from: + {{- if .Values.networkPolicy.ingressRules.primaryAccessOnlyFrom.namespaceSelector }} + - namespaceSelector: + matchLabels: {{- include "common.tplvalues.render" (dict "value" .Values.networkPolicy.ingressRules.primaryAccessOnlyFrom.namespaceSelector "context" $) | nindent 14 }} + {{- end }} + {{- if .Values.networkPolicy.ingressRules.primaryAccessOnlyFrom.podSelector }} + - podSelector: + matchLabels: {{- include "common.tplvalues.render" (dict "value" .Values.networkPolicy.ingressRules.primaryAccessOnlyFrom.podSelector "context" $) | nindent 14 }} + {{- end }} + ports: + - port: {{ .Values.containerPorts.postgresql }} + {{- end }} + {{- if and .Values.networkPolicy.ingressRules.primaryAccessOnlyFrom.enabled (eq .Values.architecture "replication") }} + - from: + - podSelector: + matchLabels: {{- include "common.labels.matchLabels" . | nindent 14 }} + app.kubernetes.io/component: read + ports: + - port: {{ .Values.containerPorts.postgresql }} + {{- end }} + {{- if .Values.networkPolicy.ingressRules.primaryAccessOnlyFrom.customRules }} + {{- include "common.tplvalues.render" (dict "value" .Values.networkPolicy.ingressRules.primaryAccessOnlyFrom.customRules "context" $) | nindent 4 }} + {{- end }} +{{- end }} diff --git a/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/charts/postgresql/templates/primary/servicemonitor.yaml b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/charts/postgresql/templates/primary/servicemonitor.yaml new file mode 100644 index 00000000..c4a19fe0 --- /dev/null +++ b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/charts/postgresql/templates/primary/servicemonitor.yaml @@ -0,0 +1,48 @@ +{{- if and .Values.metrics.enabled .Values.metrics.serviceMonitor.enabled }} +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: {{ include "postgresql.primary.fullname" . }} + namespace: {{ default .Release.Namespace .Values.metrics.serviceMonitor.namespace | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/component: metrics + {{- if .Values.metrics.serviceMonitor.labels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.metrics.serviceMonitor.labels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + {{- if .Values.metrics.serviceMonitor.jobLabel }} + jobLabel: {{ .Values.metrics.serviceMonitor.jobLabel }} + {{- end }} + selector: + matchLabels: {{- include "common.labels.matchLabels" . | nindent 6 }} + {{- if .Values.metrics.serviceMonitor.selector }} + {{- include "common.tplvalues.render" (dict "value" .Values.metrics.serviceMonitor.selector "context" $) | nindent 6 }} + {{- end }} + app.kubernetes.io/component: metrics + endpoints: + - port: http-metrics + {{- if .Values.metrics.serviceMonitor.interval }} + interval: {{ .Values.metrics.serviceMonitor.interval }} + {{- end }} + {{- if .Values.metrics.serviceMonitor.scrapeTimeout }} + scrapeTimeout: {{ .Values.metrics.serviceMonitor.scrapeTimeout }} + {{- end }} + {{- if .Values.metrics.serviceMonitor.relabelings }} + relabelings: {{- include "common.tplvalues.render" ( dict "value" .Values.metrics.serviceMonitor.relabelings "context" $) | nindent 6 }} + {{- end }} + {{- if .Values.metrics.serviceMonitor.metricRelabelings }} + metricRelabelings: {{- include "common.tplvalues.render" ( dict "value" .Values.metrics.serviceMonitor.metricRelabelings "context" $) | nindent 6 }} + {{- end }} + {{- if .Values.metrics.serviceMonitor.honorLabels }} + honorLabels: {{ .Values.metrics.serviceMonitor.honorLabels }} + {{- end }} + namespaceSelector: + matchNames: + - {{ .Release.Namespace | quote }} +{{- end }} diff --git a/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/charts/postgresql/templates/primary/statefulset.yaml b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/charts/postgresql/templates/primary/statefulset.yaml new file mode 100644 index 00000000..a1268821 --- /dev/null +++ b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/charts/postgresql/templates/primary/statefulset.yaml @@ -0,0 +1,642 @@ +{{- $customUser := include "postgresql.username" . }} +apiVersion: {{ include "common.capabilities.statefulset.apiVersion" . }} +kind: StatefulSet +metadata: + name: {{ include "postgresql.primary.fullname" . }} + namespace: {{ .Release.Namespace | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/component: primary + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.primary.labels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.primary.labels "context" $ ) | nindent 4 }} + {{- end }} + annotations: + {{- if .Values.commonAnnotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.primary.annotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.primary.annotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + replicas: 1 + serviceName: {{ include "postgresql.primary.svc.headless" . }} + {{- if .Values.primary.updateStrategy }} + updateStrategy: {{- toYaml .Values.primary.updateStrategy | nindent 4 }} + {{- end }} + selector: + matchLabels: {{- include "common.labels.matchLabels" . | nindent 6 }} + app.kubernetes.io/component: primary + template: + metadata: + name: {{ include "postgresql.primary.fullname" . }} + labels: {{- include "common.labels.standard" . | nindent 8 }} + app.kubernetes.io/component: primary + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 8 }} + {{- end }} + {{- if .Values.primary.podLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.primary.podLabels "context" $ ) | nindent 8 }} + {{- end }} + annotations: + {{- if (include "postgresql.primary.createConfigmap" .) }} + checksum/configuration: {{ include (print $.Template.BasePath "/primary/configmap.yaml") . | sha256sum }} + {{- end }} + {{- if (include "postgresql.primary.createExtendedConfigmap" .) }} + checksum/extended-configuration: {{ include (print $.Template.BasePath "/primary/extended-configmap.yaml") . | sha256sum }} + {{- end }} + {{- if .Values.primary.podAnnotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.primary.podAnnotations "context" $ ) | nindent 8 }} + {{- end }} + spec: + {{- if .Values.primary.extraPodSpec }} + {{- include "common.tplvalues.render" (dict "value" .Values.primary.extraPodSpec "context" $) | nindent 6 }} + {{- end }} + serviceAccountName: {{ include "postgresql.serviceAccountName" . }} + {{- include "postgresql.imagePullSecrets" . | nindent 6 }} + {{- if .Values.primary.hostAliases }} + hostAliases: {{- include "common.tplvalues.render" (dict "value" .Values.primary.hostAliases "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.primary.affinity }} + affinity: {{- include "common.tplvalues.render" (dict "value" .Values.primary.affinity "context" $) | nindent 8 }} + {{- else }} + affinity: + podAffinity: {{- include "common.affinities.pods" (dict "type" .Values.primary.podAffinityPreset "component" "primary" "context" $) | nindent 10 }} + podAntiAffinity: {{- include "common.affinities.pods" (dict "type" .Values.primary.podAntiAffinityPreset "component" "primary" "context" $) | nindent 10 }} + nodeAffinity: {{- include "common.affinities.nodes" (dict "type" .Values.primary.nodeAffinityPreset.type "key" .Values.primary.nodeAffinityPreset.key "values" .Values.primary.nodeAffinityPreset.values) | nindent 10 }} + {{- end }} + {{- if .Values.primary.nodeSelector }} + nodeSelector: {{- include "common.tplvalues.render" (dict "value" .Values.primary.nodeSelector "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.primary.tolerations }} + tolerations: {{- include "common.tplvalues.render" (dict "value" .Values.primary.tolerations "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.primary.topologySpreadConstraints }} + topologySpreadConstraints: {{- include "common.tplvalues.render" (dict "value" .Values.primary.topologySpreadConstraints "context" .) | nindent 8 }} + {{- end }} + {{- if .Values.primary.priorityClassName }} + priorityClassName: {{ .Values.primary.priorityClassName }} + {{- end }} + {{- if .Values.primary.schedulerName }} + schedulerName: {{ .Values.primary.schedulerName | quote }} + {{- end }} + {{- if .Values.primary.terminationGracePeriodSeconds }} + terminationGracePeriodSeconds: {{ .Values.primary.terminationGracePeriodSeconds }} + {{- end }} + {{- if .Values.primary.podSecurityContext.enabled }} + securityContext: {{- omit .Values.primary.podSecurityContext "enabled" | toYaml | nindent 8 }} + {{- end }} + hostNetwork: {{ .Values.primary.hostNetwork }} + hostIPC: {{ .Values.primary.hostIPC }} + initContainers: + {{- if and .Values.tls.enabled (not .Values.volumePermissions.enabled) }} + - name: copy-certs + image: {{ include "postgresql.volumePermissions.image" . }} + imagePullPolicy: {{ .Values.volumePermissions.image.pullPolicy | quote }} + {{- if .Values.primary.resources }} + resources: {{- toYaml .Values.primary.resources | nindent 12 }} + {{- end }} + # We don't require a privileged container in this case + {{- if .Values.primary.containerSecurityContext.enabled }} + securityContext: {{- omit .Values.primary.containerSecurityContext "enabled" | toYaml | nindent 12 }} + {{- end }} + command: + - /bin/sh + - -ec + - | + cp /tmp/certs/* /opt/bitnami/postgresql/certs/ + chmod 600 {{ include "postgresql.tlsCertKey" . }} + volumeMounts: + - name: raw-certificates + mountPath: /tmp/certs + - name: postgresql-certificates + mountPath: /opt/bitnami/postgresql/certs + {{- else if and .Values.volumePermissions.enabled (or .Values.primary.persistence.enabled .Values.shmVolume.enabled) }} + - name: init-chmod-data + image: {{ include "postgresql.volumePermissions.image" . }} + imagePullPolicy: {{ .Values.volumePermissions.image.pullPolicy | quote }} + {{- if .Values.volumePermissions.resources }} + resources: {{- toYaml .Values.volumePermissions.resources | nindent 12 }} + {{- end }} + command: + - /bin/sh + - -ec + - | + {{- if .Values.primary.persistence.enabled }} + {{- if eq ( toString ( .Values.volumePermissions.containerSecurityContext.runAsUser )) "auto" }} + chown `id -u`:`id -G | cut -d " " -f2` {{ .Values.primary.persistence.mountPath }} + {{- else }} + chown {{ .Values.primary.containerSecurityContext.runAsUser }}:{{ .Values.primary.podSecurityContext.fsGroup }} {{ .Values.primary.persistence.mountPath }} + {{- end }} + mkdir -p {{ .Values.primary.persistence.mountPath }}/data {{- if (include "postgresql.mountConfigurationCM" .) }} {{ .Values.primary.persistence.mountPath }}/conf {{- end }} + chmod 700 {{ .Values.primary.persistence.mountPath }}/data {{- if (include "postgresql.mountConfigurationCM" .) }} {{ .Values.primary.persistence.mountPath }}/conf {{- end }} + find {{ .Values.primary.persistence.mountPath }} -mindepth 1 -maxdepth 1 {{- if not (include "postgresql.mountConfigurationCM" .) }} -not -name "conf" {{- end }} -not -name ".snapshot" -not -name "lost+found" | \ + {{- if eq ( toString ( .Values.volumePermissions.containerSecurityContext.runAsUser )) "auto" }} + xargs -r chown -R `id -u`:`id -G | cut -d " " -f2` + {{- else }} + xargs -r chown -R {{ .Values.primary.containerSecurityContext.runAsUser }}:{{ .Values.primary.podSecurityContext.fsGroup }} + {{- end }} + {{- end }} + {{- if .Values.shmVolume.enabled }} + chmod -R 777 /dev/shm + {{- end }} + {{- if .Values.tls.enabled }} + cp /tmp/certs/* /opt/bitnami/postgresql/certs/ + {{- if eq ( toString ( .Values.volumePermissions.containerSecurityContext.runAsUser )) "auto" }} + chown -R `id -u`:`id -G | cut -d " " -f2` /opt/bitnami/postgresql/certs/ + {{- else }} + chown -R {{ .Values.primary.containerSecurityContext.runAsUser }}:{{ .Values.primary.podSecurityContext.fsGroup }} /opt/bitnami/postgresql/certs/ + {{- end }} + chmod 600 {{ include "postgresql.tlsCertKey" . }} + {{- end }} + {{- if eq ( toString ( .Values.volumePermissions.containerSecurityContext.runAsUser )) "auto" }} + securityContext: {{- omit .Values.volumePermissions.containerSecurityContext "runAsUser" | toYaml | nindent 12 }} + {{- else }} + securityContext: {{- .Values.volumePermissions.containerSecurityContext | toYaml | nindent 12 }} + {{- end }} + volumeMounts: + {{- if .Values.primary.persistence.enabled }} + - name: data + mountPath: {{ .Values.primary.persistence.mountPath }} + {{- if .Values.primary.persistence.subPath }} + subPath: {{ .Values.primary.persistence.subPath }} + {{- end }} + {{- end }} + {{- if .Values.shmVolume.enabled }} + - name: dshm + mountPath: /dev/shm + {{- end }} + {{- if .Values.tls.enabled }} + - name: raw-certificates + mountPath: /tmp/certs + - name: postgresql-certificates + mountPath: /opt/bitnami/postgresql/certs + {{- end }} + {{- end }} + {{- if .Values.primary.initContainers }} + {{- include "common.tplvalues.render" ( dict "value" .Values.primary.initContainers "context" $ ) | nindent 8 }} + {{- end }} + containers: + - name: postgresql + image: {{ include "postgresql.image" . }} + imagePullPolicy: {{ .Values.image.pullPolicy | quote }} + {{- if .Values.primary.containerSecurityContext.enabled }} + securityContext: {{- omit .Values.primary.containerSecurityContext "enabled" | toYaml | nindent 12 }} + {{- end }} + {{- if .Values.diagnosticMode.enabled }} + command: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.command "context" $) | nindent 12 }} + {{- else if .Values.primary.command }} + command: {{- include "common.tplvalues.render" (dict "value" .Values.primary.command "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.diagnosticMode.enabled }} + args: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.args "context" $) | nindent 12 }} + {{- else if .Values.primary.args }} + args: {{- include "common.tplvalues.render" (dict "value" .Values.primary.args "context" $) | nindent 12 }} + {{- end }} + env: + - name: BITNAMI_DEBUG + value: {{ ternary "true" "false" (or .Values.image.debug .Values.diagnosticMode.enabled) | quote }} + - name: POSTGRESQL_PORT_NUMBER + value: {{ .Values.containerPorts.postgresql | quote }} + - name: POSTGRESQL_VOLUME_DIR + value: {{ .Values.primary.persistence.mountPath | quote }} + {{- if .Values.primary.persistence.mountPath }} + - name: PGDATA + value: {{ .Values.postgresqlDataDir | quote }} + {{- end }} + # Authentication + {{- if and (not (empty $customUser)) (ne $customUser "postgres") }} + - name: POSTGRES_USER + value: {{ $customUser | quote }} + {{- if .Values.auth.enablePostgresUser }} + {{- if .Values.auth.usePasswordFiles }} + - name: POSTGRES_POSTGRES_PASSWORD_FILE + value: "/opt/bitnami/postgresql/secrets/postgres-password" + {{- else }} + - name: POSTGRES_POSTGRES_PASSWORD + valueFrom: + secretKeyRef: + name: {{ include "postgresql.secretName" . }} + key: {{ include "postgresql.adminPasswordKey" . }} + {{- end }} + {{- end }} + {{- end }} + {{- if .Values.auth.usePasswordFiles }} + - name: POSTGRES_PASSWORD_FILE + value: {{ printf "/opt/bitnami/postgresql/secrets/%s" (ternary "password" "postgres-password" (and (not (empty $customUser)) (ne $customUser "postgres"))) }} + {{- else }} + - name: POSTGRES_PASSWORD + valueFrom: + secretKeyRef: + name: {{ include "postgresql.secretName" . }} + key: {{ include "postgresql.userPasswordKey" . }} + {{- end }} + {{- if (include "postgresql.database" .) }} + - name: POSTGRES_DB + value: {{ (include "postgresql.database" .) | quote }} + {{- end }} + # Replication + {{- if or (eq .Values.architecture "replication") .Values.primary.standby.enabled }} + - name: POSTGRES_REPLICATION_MODE + value: {{ ternary "slave" "master" .Values.primary.standby.enabled | quote }} + - name: POSTGRES_REPLICATION_USER + value: {{ .Values.auth.replicationUsername | quote }} + {{- if .Values.auth.usePasswordFiles }} + - name: POSTGRES_REPLICATION_PASSWORD_FILE + value: "/opt/bitnami/postgresql/secrets/replication-password" + {{- else }} + - name: POSTGRES_REPLICATION_PASSWORD + valueFrom: + secretKeyRef: + name: {{ include "postgresql.secretName" . }} + key: {{ include "postgresql.replicationPasswordKey" . }} + {{- end }} + {{- if not (eq .Values.replication.synchronousCommit "off") }} + - name: POSTGRES_SYNCHRONOUS_COMMIT_MODE + value: {{ .Values.replication.synchronousCommit | quote }} + - name: POSTGRES_NUM_SYNCHRONOUS_REPLICAS + value: {{ .Values.replication.numSynchronousReplicas | quote }} + {{- end }} + - name: POSTGRES_CLUSTER_APP_NAME + value: {{ .Values.replication.applicationName }} + {{- end }} + # Initdb + {{- if .Values.primary.initdb.args }} + - name: POSTGRES_INITDB_ARGS + value: {{ .Values.primary.initdb.args | quote }} + {{- end }} + {{- if .Values.primary.initdb.postgresqlWalDir }} + - name: POSTGRES_INITDB_WALDIR + value: {{ .Values.primary.initdb.postgresqlWalDir | quote }} + {{- end }} + {{- if .Values.primary.initdb.user }} + - name: POSTGRESQL_INITSCRIPTS_USERNAME + value: {{ .Values.primary.initdb.user }} + {{- end }} + {{- if .Values.primary.initdb.password }} + - name: POSTGRESQL_INITSCRIPTS_PASSWORD + value: {{ .Values.primary.initdb.password | quote }} + {{- end }} + # Standby + {{- if .Values.primary.standby.enabled }} + - name: POSTGRES_MASTER_HOST + value: {{ .Values.primary.standby.primaryHost }} + - name: POSTGRES_MASTER_PORT_NUMBER + value: {{ .Values.primary.standby.primaryPort | quote }} + {{- end }} + # LDAP + - name: POSTGRESQL_ENABLE_LDAP + value: {{ ternary "yes" "no" .Values.ldap.enabled | quote }} + {{- if .Values.ldap.enabled }} + {{- if or .Values.ldap.url .Values.ldap.uri }} + - name: POSTGRESQL_LDAP_URL + value: {{ coalesce .Values.ldap.url .Values.ldap.uri }} + {{- else }} + - name: POSTGRESQL_LDAP_SERVER + value: {{ .Values.ldap.server }} + - name: POSTGRESQL_LDAP_PORT + value: {{ .Values.ldap.port | quote }} + - name: POSTGRESQL_LDAP_SCHEME + value: {{ .Values.ldap.scheme }} + {{- if (include "postgresql.ldap.tls.enabled" .) }} + - name: POSTGRESQL_LDAP_TLS + value: "1" + {{- end }} + - name: POSTGRESQL_LDAP_PREFIX + value: {{ .Values.ldap.prefix | quote }} + - name: POSTGRESQL_LDAP_SUFFIX + value: {{ .Values.ldap.suffix | quote }} + - name: POSTGRESQL_LDAP_BASE_DN + value: {{ coalesce .Values.ldap.baseDN .Values.ldap.basedn }} + - name: POSTGRESQL_LDAP_BIND_DN + value: {{ coalesce .Values.ldap.bindDN .Values.ldap.binddn}} + {{- if or (not (empty .Values.ldap.bind_password)) (not (empty .Values.ldap.bindpw)) }} + - name: POSTGRESQL_LDAP_BIND_PASSWORD + valueFrom: + secretKeyRef: + name: {{ include "postgresql.secretName" . }} + key: ldap-password + {{- end }} + - name: POSTGRESQL_LDAP_SEARCH_ATTR + value: {{ coalesce .Values.ldap.search_attr .Values.ldap.searchAttribute }} + - name: POSTGRESQL_LDAP_SEARCH_FILTER + value: {{ coalesce .Values.ldap.search_filter .Values.ldap.searchFilter }} + {{- end }} + {{- end }} + # TLS + - name: POSTGRESQL_ENABLE_TLS + value: {{ ternary "yes" "no" .Values.tls.enabled | quote }} + {{- if .Values.tls.enabled }} + - name: POSTGRESQL_TLS_PREFER_SERVER_CIPHERS + value: {{ ternary "yes" "no" .Values.tls.preferServerCiphers | quote }} + - name: POSTGRESQL_TLS_CERT_FILE + value: {{ include "postgresql.tlsCert" . }} + - name: POSTGRESQL_TLS_KEY_FILE + value: {{ include "postgresql.tlsCertKey" . }} + {{- if .Values.tls.certCAFilename }} + - name: POSTGRESQL_TLS_CA_FILE + value: {{ include "postgresql.tlsCACert" . }} + {{- end }} + {{- if .Values.tls.crlFilename }} + - name: POSTGRESQL_TLS_CRL_FILE + value: {{ include "postgresql.tlsCRL" . }} + {{- end }} + {{- end }} + # Audit + - name: POSTGRESQL_LOG_HOSTNAME + value: {{ .Values.audit.logHostname | quote }} + - name: POSTGRESQL_LOG_CONNECTIONS + value: {{ .Values.audit.logConnections | quote }} + - name: POSTGRESQL_LOG_DISCONNECTIONS + value: {{ .Values.audit.logDisconnections | quote }} + {{- if .Values.audit.logLinePrefix }} + - name: POSTGRESQL_LOG_LINE_PREFIX + value: {{ .Values.audit.logLinePrefix | quote }} + {{- end }} + {{- if .Values.audit.logTimezone }} + - name: POSTGRESQL_LOG_TIMEZONE + value: {{ .Values.audit.logTimezone | quote }} + {{- end }} + {{- if .Values.audit.pgAuditLog }} + - name: POSTGRESQL_PGAUDIT_LOG + value: {{ .Values.audit.pgAuditLog | quote }} + {{- end }} + - name: POSTGRESQL_PGAUDIT_LOG_CATALOG + value: {{ .Values.audit.pgAuditLogCatalog | quote }} + # Others + - name: POSTGRESQL_CLIENT_MIN_MESSAGES + value: {{ .Values.audit.clientMinMessages | quote }} + - name: POSTGRESQL_SHARED_PRELOAD_LIBRARIES + value: {{ .Values.postgresqlSharedPreloadLibraries | quote }} + {{- if .Values.primary.extraEnvVars }} + {{- include "common.tplvalues.render" (dict "value" .Values.primary.extraEnvVars "context" $) | nindent 12 }} + {{- end }} + {{- if or .Values.primary.extraEnvVarsCM .Values.primary.extraEnvVarsSecret }} + envFrom: + {{- if .Values.primary.extraEnvVarsCM }} + - configMapRef: + name: {{ .Values.primary.extraEnvVarsCM }} + {{- end }} + {{- if .Values.primary.extraEnvVarsSecret }} + - secretRef: + name: {{ .Values.primary.extraEnvVarsSecret }} + {{- end }} + {{- end }} + ports: + - name: tcp-postgresql + containerPort: {{ .Values.containerPorts.postgresql }} + {{- if not .Values.diagnosticMode.enabled }} + {{- if .Values.primary.startupProbe.enabled }} + startupProbe: {{- include "common.tplvalues.render" (dict "value" (omit .Values.primary.startupProbe "enabled") "context" $) | nindent 12 }} + exec: + command: + - /bin/sh + - -c + {{- if (include "postgresql.database" .) }} + - exec pg_isready -U {{ default "postgres" $customUser | quote }} -d "dbname={{ include "postgresql.database" . }} {{- if and .Values.tls.enabled .Values.tls.certCAFilename }} sslcert={{ include "postgresql.tlsCert" . }} sslkey={{ include "postgresql.tlsCertKey" . }}{{- end }}" -h 127.0.0.1 -p {{ .Values.containerPorts.postgresql }} + {{- else }} + - exec pg_isready -U {{ default "postgres" $customUser | quote }} {{- if and .Values.tls.enabled .Values.tls.certCAFilename }} -d "sslcert={{ include "postgresql.tlsCert" . }} sslkey={{ include "postgresql.tlsCertKey" . }}"{{- end }} -h 127.0.0.1 -p {{ .Values.containerPorts.postgresql }} + {{- end }} + {{- else if .Values.primary.customStartupProbe }} + startupProbe: {{- include "common.tplvalues.render" (dict "value" .Values.primary.customStartupProbe "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.primary.livenessProbe.enabled }} + livenessProbe: {{- include "common.tplvalues.render" (dict "value" (omit .Values.primary.livenessProbe "enabled") "context" $) | nindent 12 }} + exec: + command: + - /bin/sh + - -c + {{- if (include "postgresql.database" .) }} + - exec pg_isready -U {{ default "postgres" $customUser | quote }} -d "dbname={{ include "postgresql.database" . }} {{- if and .Values.tls.enabled .Values.tls.certCAFilename }} sslcert={{ include "postgresql.tlsCert" . }} sslkey={{ include "postgresql.tlsCertKey" . }}{{- end }}" -h 127.0.0.1 -p {{ .Values.containerPorts.postgresql }} + {{- else }} + - exec pg_isready -U {{ default "postgres" $customUser | quote }} {{- if and .Values.tls.enabled .Values.tls.certCAFilename }} -d "sslcert={{ include "postgresql.tlsCert" . }} sslkey={{ include "postgresql.tlsCertKey" . }}"{{- end }} -h 127.0.0.1 -p {{ .Values.containerPorts.postgresql }} + {{- end }} + {{- else if .Values.primary.customLivenessProbe }} + livenessProbe: {{- include "common.tplvalues.render" (dict "value" .Values.primary.customLivenessProbe "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.primary.readinessProbe.enabled }} + readinessProbe: {{- include "common.tplvalues.render" (dict "value" (omit .Values.primary.readinessProbe "enabled") "context" $) | nindent 12 }} + exec: + command: + - /bin/sh + - -c + - -e + {{- include "postgresql.readinessProbeCommand" . | nindent 16 }} + {{- else if .Values.primary.customReadinessProbe }} + readinessProbe: {{- include "common.tplvalues.render" (dict "value" .Values.primary.customReadinessProbe "context" $) | nindent 12 }} + {{- end }} + {{- end }} + {{- if .Values.primary.resources }} + resources: {{- toYaml .Values.primary.resources | nindent 12 }} + {{- end }} + {{- if .Values.primary.lifecycleHooks }} + lifecycle: {{- include "common.tplvalues.render" (dict "value" .Values.primary.lifecycleHooks "context" $) | nindent 12 }} + {{- end }} + volumeMounts: + {{- if or .Values.primary.initdb.scriptsConfigMap .Values.primary.initdb.scripts }} + - name: custom-init-scripts + mountPath: /docker-entrypoint-initdb.d/ + {{- end }} + {{- if .Values.primary.initdb.scriptsSecret }} + - name: custom-init-scripts-secret + mountPath: /docker-entrypoint-initdb.d/secret + {{- end }} + {{- if or .Values.primary.extendedConfiguration .Values.primary.existingExtendedConfigmap }} + - name: postgresql-extended-config + mountPath: /bitnami/postgresql/conf/conf.d/ + {{- end }} + {{- if .Values.auth.usePasswordFiles }} + - name: postgresql-password + mountPath: /opt/bitnami/postgresql/secrets/ + {{- end }} + {{- if .Values.tls.enabled }} + - name: postgresql-certificates + mountPath: /opt/bitnami/postgresql/certs + readOnly: true + {{- end }} + {{- if .Values.shmVolume.enabled }} + - name: dshm + mountPath: /dev/shm + {{- end }} + {{- if .Values.primary.persistence.enabled }} + - name: data + mountPath: {{ .Values.primary.persistence.mountPath }} + {{- if .Values.primary.persistence.subPath }} + subPath: {{ .Values.primary.persistence.subPath }} + {{- end }} + {{- end }} + {{- if or .Values.primary.configuration .Values.primary.pgHbaConfiguration .Values.primary.existingConfigmap }} + - name: postgresql-config + mountPath: /bitnami/postgresql/conf + {{- end }} + {{- if .Values.primary.extraVolumeMounts }} + {{- include "common.tplvalues.render" (dict "value" .Values.primary.extraVolumeMounts "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.metrics.enabled }} + - name: metrics + image: {{ include "postgresql.metrics.image" . }} + imagePullPolicy: {{ .Values.metrics.image.pullPolicy | quote }} + {{- if .Values.metrics.containerSecurityContext.enabled }} + securityContext: {{- omit .Values.metrics.containerSecurityContext "enabled" | toYaml | nindent 12 }} + {{- end }} + {{- if .Values.diagnosticMode.enabled }} + command: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.command "context" $) | nindent 12 }} + args: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.args "context" $) | nindent 12 }} + {{- else if .Values.metrics.customMetrics }} + args: ["--extend.query-path", "/conf/custom-metrics.yaml"] + {{- end }} + env: + {{- $database := required "In order to enable metrics you need to specify a database (.Values.auth.database or .Values.global.postgresql.auth.database)" (include "postgresql.database" .) }} + {{- $sslmode := ternary "require" "disable" .Values.tls.enabled }} + {{- if and .Values.tls.enabled .Values.tls.certCAFilename }} + - name: DATA_SOURCE_NAME + value: {{ printf "host=127.0.0.1 port=%d user=%s sslmode=%s sslcert=%s sslkey=%s" (int (include "postgresql.service.port" .)) (default "postgres" $customUser) $sslmode (include "postgresql.tlsCert" .) (include "postgresql.tlsCertKey" .) }} + {{- else }} + - name: DATA_SOURCE_URI + value: {{ printf "127.0.0.1:%d/%s?sslmode=%s" (int (include "postgresql.service.port" .)) $database $sslmode }} + {{- end }} + {{- if .Values.auth.usePasswordFiles }} + - name: DATA_SOURCE_PASS_FILE + value: {{ printf "/opt/bitnami/postgresql/secrets/%s" (ternary "password" "postgres-password" (and (not (empty $customUser)) (ne $customUser "postgres"))) }} + {{- else }} + - name: DATA_SOURCE_PASS + valueFrom: + secretKeyRef: + name: {{ include "postgresql.secretName" . }} + key: {{ include "postgresql.userPasswordKey" . }} + {{- end }} + - name: DATA_SOURCE_USER + value: {{ default "postgres" $customUser | quote }} + {{- if .Values.metrics.extraEnvVars }} + {{- include "common.tplvalues.render" (dict "value" .Values.metrics.extraEnvVars "context" $) | nindent 12 }} + {{- end }} + ports: + - name: http-metrics + containerPort: {{ .Values.metrics.containerPorts.metrics }} + {{- if not .Values.diagnosticMode.enabled }} + {{- if .Values.metrics.startupProbe.enabled }} + startupProbe: {{- include "common.tplvalues.render" (dict "value" (omit .Values.metrics.startupProbe "enabled") "context" $) | nindent 12 }} + tcpSocket: + port: http-metrics + {{- else if .Values.metrics.customStartupProbe }} + startupProbe: {{- include "common.tplvalues.render" (dict "value" .Values.metrics.customStartupProbe "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.metrics.livenessProbe.enabled }} + livenessProbe: {{- include "common.tplvalues.render" (dict "value" (omit .Values.metrics.livenessProbe "enabled") "context" $) | nindent 12 }} + httpGet: + path: / + port: http-metrics + {{- else if .Values.metrics.customLivenessProbe }} + livenessProbe: {{- include "common.tplvalues.render" (dict "value" .Values.metrics.customLivenessProbe "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.metrics.readinessProbe.enabled }} + readinessProbe: {{- include "common.tplvalues.render" (dict "value" (omit .Values.metrics.readinessProbe "enabled") "context" $) | nindent 12 }} + httpGet: + path: / + port: http-metrics + {{- else if .Values.metrics.customReadinessProbe }} + readinessProbe: {{- include "common.tplvalues.render" (dict "value" .Values.metrics.customReadinessProbe "context" $) | nindent 12 }} + {{- end }} + {{- end }} + volumeMounts: + {{- if .Values.auth.usePasswordFiles }} + - name: postgresql-password + mountPath: /opt/bitnami/postgresql/secrets/ + {{- end }} + {{- if .Values.tls.enabled }} + - name: postgresql-certificates + mountPath: /opt/bitnami/postgresql/certs + readOnly: true + {{- end }} + {{- if .Values.metrics.customMetrics }} + - name: custom-metrics + mountPath: /conf + readOnly: true + {{- end }} + {{- if .Values.metrics.resources }} + resources: {{- toYaml .Values.metrics.resources | nindent 12 }} + {{- end }} + {{- end }} + {{- if .Values.primary.sidecars }} + {{- include "common.tplvalues.render" ( dict "value" .Values.primary.sidecars "context" $ ) | nindent 8 }} + {{- end }} + volumes: + {{- if or .Values.primary.configuration .Values.primary.pgHbaConfiguration .Values.primary.existingConfigmap }} + - name: postgresql-config + configMap: + name: {{ include "postgresql.primary.configmapName" . }} + {{- end }} + {{- if or .Values.primary.extendedConfiguration .Values.primary.existingExtendedConfigmap }} + - name: postgresql-extended-config + configMap: + name: {{ include "postgresql.primary.extendedConfigmapName" . }} + {{- end }} + {{- if .Values.auth.usePasswordFiles }} + - name: postgresql-password + secret: + secretName: {{ include "postgresql.secretName" . }} + {{- end }} + {{- if or .Values.primary.initdb.scriptsConfigMap .Values.primary.initdb.scripts }} + - name: custom-init-scripts + configMap: + name: {{ include "postgresql.initdb.scriptsCM" . }} + {{- end }} + {{- if .Values.primary.initdb.scriptsSecret }} + - name: custom-init-scripts-secret + secret: + secretName: {{ tpl .Values.primary.initdb.scriptsSecret $ }} + {{- end }} + {{- if .Values.tls.enabled }} + - name: raw-certificates + secret: + secretName: {{ include "postgresql.tlsSecretName" . }} + - name: postgresql-certificates + emptyDir: {} + {{- end }} + {{- if .Values.primary.extraVolumes }} + {{- include "common.tplvalues.render" ( dict "value" .Values.primary.extraVolumes "context" $ ) | nindent 8 }} + {{- end }} + {{- if and .Values.metrics.enabled .Values.metrics.customMetrics }} + - name: custom-metrics + configMap: + name: {{ printf "%s-metrics" (include "common.names.fullname" .) }} + {{- end }} + {{- if .Values.shmVolume.enabled }} + - name: dshm + emptyDir: + medium: Memory + {{- if .Values.shmVolume.sizeLimit }} + sizeLimit: {{ .Values.shmVolume.sizeLimit }} + {{- end }} + {{- end }} + {{- if and .Values.primary.persistence.enabled .Values.primary.persistence.existingClaim }} + - name: data + persistentVolumeClaim: + claimName: {{ tpl .Values.primary.persistence.existingClaim $ }} + {{- else if not .Values.primary.persistence.enabled }} + - name: data + emptyDir: {} + {{- else }} + volumeClaimTemplates: + - metadata: + name: data + {{- if .Values.primary.persistence.annotations }} + annotations: {{- toYaml .Values.primary.persistence.annotations | nindent 10 }} + {{- end }} + spec: + accessModes: + {{- range .Values.primary.persistence.accessModes }} + - {{ . | quote }} + {{- end }} + {{- if .Values.primary.persistence.dataSource }} + dataSource: {{- include "common.tplvalues.render" (dict "value" .Values.primary.persistence.dataSource "context" $) | nindent 10 }} + {{- end }} + resources: + requests: + storage: {{ .Values.primary.persistence.size | quote }} + {{- if .Values.primary.persistence.selector }} + selector: {{- include "common.tplvalues.render" (dict "value" .Values.primary.persistence.selector "context" $) | nindent 10 }} + {{- end }} + {{- include "common.storage.class" (dict "persistence" .Values.primary.persistence "global" .Values.global) | nindent 8 }} + {{- end }} diff --git a/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/charts/postgresql/templates/primary/svc-headless.yaml b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/charts/postgresql/templates/primary/svc-headless.yaml new file mode 100644 index 00000000..b7826318 --- /dev/null +++ b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/charts/postgresql/templates/primary/svc-headless.yaml @@ -0,0 +1,31 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ include "postgresql.primary.svc.headless" . }} + namespace: {{ .Release.Namespace | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + app.kubernetes.io/component: primary + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} + # Use this annotation in addition to the actual publishNotReadyAddresses + # field below because the annotation will stop being respected soon but the + # field is broken in some versions of Kubernetes: + # https://github.com/kubernetes/kubernetes/issues/58662 + service.alpha.kubernetes.io/tolerate-unready-endpoints: "true" +spec: + type: ClusterIP + clusterIP: None + # We want all pods in the StatefulSet to have their addresses published for + # the sake of the other Postgresql pods even before they're ready, since they + # have to be able to talk to each other in order to become ready. + publishNotReadyAddresses: true + ports: + - name: tcp-postgresql + port: {{ template "postgresql.service.port" . }} + targetPort: tcp-postgresql + selector: {{- include "common.labels.matchLabels" . | nindent 4 }} + app.kubernetes.io/component: primary diff --git a/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/charts/postgresql/templates/primary/svc.yaml b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/charts/postgresql/templates/primary/svc.yaml new file mode 100644 index 00000000..cf184809 --- /dev/null +++ b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/charts/postgresql/templates/primary/svc.yaml @@ -0,0 +1,51 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ include "postgresql.primary.fullname" . }} + namespace: {{ .Release.Namespace | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + app.kubernetes.io/component: primary + annotations: + {{- if .Values.commonAnnotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.primary.service.annotations }} + {{- include "common.tplvalues.render" (dict "value" .Values.primary.service.annotations "context" $) | nindent 4 }} + {{- end }} +spec: + type: {{ .Values.primary.service.type }} + {{- if or (eq .Values.primary.service.type "LoadBalancer") (eq .Values.primary.service.type "NodePort") }} + externalTrafficPolicy: {{ .Values.primary.service.externalTrafficPolicy | quote }} + {{- end }} + {{- if and (eq .Values.primary.service.type "LoadBalancer") (not (empty .Values.primary.service.loadBalancerSourceRanges)) }} + loadBalancerSourceRanges: {{ .Values.primary.service.loadBalancerSourceRanges }} + {{- end }} + {{- if and (eq .Values.primary.service.type "LoadBalancer") (not (empty .Values.primary.service.loadBalancerIP)) }} + loadBalancerIP: {{ .Values.primary.service.loadBalancerIP }} + {{- end }} + {{- if and .Values.primary.service.clusterIP (eq .Values.primary.service.type "ClusterIP") }} + clusterIP: {{ .Values.primary.service.clusterIP }} + {{- end }} + {{- if .Values.primary.service.sessionAffinity }} + sessionAffinity: {{ .Values.primary.service.sessionAffinity }} + {{- end }} + {{- if .Values.primary.service.sessionAffinityConfig }} + sessionAffinityConfig: {{- include "common.tplvalues.render" (dict "value" .Values.primary.service.sessionAffinityConfig "context" $) | nindent 4 }} + {{- end }} + ports: + - name: tcp-postgresql + port: {{ template "postgresql.service.port" . }} + targetPort: tcp-postgresql + {{- if and (or (eq .Values.primary.service.type "NodePort") (eq .Values.primary.service.type "LoadBalancer")) (not (empty .Values.primary.service.nodePorts.postgresql)) }} + nodePort: {{ .Values.primary.service.nodePorts.postgresql }} + {{- else if eq .Values.primary.service.type "ClusterIP" }} + nodePort: null + {{- end }} + {{- if .Values.primary.service.extraPorts }} + {{- include "common.tplvalues.render" (dict "value" .Values.primary.service.extraPorts "context" $) | nindent 4 }} + {{- end }} + selector: {{- include "common.labels.matchLabels" . | nindent 4 }} + app.kubernetes.io/component: primary diff --git a/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/charts/postgresql/templates/prometheusrule.yaml b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/charts/postgresql/templates/prometheusrule.yaml new file mode 100644 index 00000000..24be7100 --- /dev/null +++ b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/charts/postgresql/templates/prometheusrule.yaml @@ -0,0 +1,22 @@ +{{- if and .Values.metrics.enabled .Values.metrics.prometheusRule.enabled }} +apiVersion: monitoring.coreos.com/v1 +kind: PrometheusRule +metadata: + name: {{ include "common.names.fullname" . }} + namespace: {{ default .Release.Namespace .Values.metrics.prometheusRule.namespace | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/component: metrics + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.metrics.prometheusRule.labels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.metrics.prometheusRule.labels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + groups: + - name: {{ include "common.names.fullname" . }} + rules: {{- include "common.tplvalues.render" ( dict "value" .Values.metrics.prometheusRule.rules "context" $ ) | nindent 8 }} +{{- end }} diff --git a/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/charts/postgresql/templates/psp.yaml b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/charts/postgresql/templates/psp.yaml new file mode 100644 index 00000000..48d11754 --- /dev/null +++ b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/charts/postgresql/templates/psp.yaml @@ -0,0 +1,41 @@ +{{- $pspAvailable := (semverCompare "<1.25-0" (include "common.capabilities.kubeVersion" .)) -}} +{{- if and $pspAvailable .Values.psp.create }} +apiVersion: policy/v1beta1 +kind: PodSecurityPolicy +metadata: + name: {{ include "common.names.fullname" . }} + namespace: {{ .Release.Namespace | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + privileged: false + volumes: + - 'configMap' + - 'secret' + - 'persistentVolumeClaim' + - 'emptyDir' + - 'projected' + hostNetwork: false + hostIPC: false + hostPID: false + runAsUser: + rule: 'RunAsAny' + seLinux: + rule: 'RunAsAny' + supplementalGroups: + rule: 'MustRunAs' + ranges: + - min: 1 + max: 65535 + fsGroup: + rule: 'MustRunAs' + ranges: + - min: 1 + max: 65535 + readOnlyRootFilesystem: false +{{- end }} diff --git a/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/charts/postgresql/templates/read/metrics-configmap.yaml b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/charts/postgresql/templates/read/metrics-configmap.yaml new file mode 100644 index 00000000..ddaae75b --- /dev/null +++ b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/charts/postgresql/templates/read/metrics-configmap.yaml @@ -0,0 +1,16 @@ +{{- if and .Values.metrics.enabled .Values.metrics.customMetrics }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ printf "%s-metrics" (include "postgresql.readReplica.fullname" .) }} + namespace: {{ .Release.Namespace | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +data: + custom-metrics.yaml: {{ toYaml .Values.metrics.customMetrics | quote }} +{{- end }} diff --git a/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/charts/postgresql/templates/read/metrics-svc.yaml b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/charts/postgresql/templates/read/metrics-svc.yaml new file mode 100644 index 00000000..846d853a --- /dev/null +++ b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/charts/postgresql/templates/read/metrics-svc.yaml @@ -0,0 +1,31 @@ +{{- if .Values.metrics.enabled }} +apiVersion: v1 +kind: Service +metadata: + name: {{ printf "%s-metrics" (include "postgresql.readReplica.fullname" .) }} + namespace: {{ .Release.Namespace | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/component: metrics + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + annotations: + {{- if .Values.commonAnnotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.metrics.service.annotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.metrics.service.annotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + type: ClusterIP + sessionAffinity: {{ .Values.metrics.service.sessionAffinity }} + {{- if .Values.metrics.service.clusterIP }} + clusterIP: {{ .Values.metrics.service.clusterIP }} + {{- end }} + ports: + - name: http-metrics + port: {{ .Values.metrics.service.ports.metrics }} + targetPort: http-metrics + selector: {{- include "common.labels.matchLabels" . | nindent 4 }} + app.kubernetes.io/component: read +{{- end }} diff --git a/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/charts/postgresql/templates/read/networkpolicy.yaml b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/charts/postgresql/templates/read/networkpolicy.yaml new file mode 100644 index 00000000..c969cd7a --- /dev/null +++ b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/charts/postgresql/templates/read/networkpolicy.yaml @@ -0,0 +1,36 @@ +{{- if and .Values.networkPolicy.enabled (eq .Values.architecture "replication") .Values.networkPolicy.ingressRules.readReplicasAccessOnlyFrom.enabled }} +apiVersion: {{ include "common.capabilities.networkPolicy.apiVersion" . }} +kind: NetworkPolicy +metadata: + name: {{ printf "%s-ingress" (include "postgresql.readReplica.fullname" .) }} + namespace: {{ .Release.Namespace | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/component: read + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + podSelector: + matchLabels: {{- include "common.labels.matchLabels" . | nindent 6 }} + app.kubernetes.io/component: read + ingress: + {{- if and .Values.networkPolicy.ingressRules.readReplicasAccessOnlyFrom.enabled (or .Values.networkPolicy.ingressRules.readReplicasAccessOnlyFrom.namespaceSelector .Values.networkPolicy.ingressRules.readReplicasAccessOnlyFrom.podSelector) }} + - from: + {{- if .Values.networkPolicy.ingressRules.readReplicasAccessOnlyFrom.namespaceSelector }} + - namespaceSelector: + matchLabels: {{- include "common.tplvalues.render" (dict "value" .Values.networkPolicy.ingressRules.readReplicasAccessOnlyFrom.namespaceSelector "context" $) | nindent 14 }} + {{- end }} + {{- if .Values.networkPolicy.ingressRules.readReplicasAccessOnlyFrom.podSelector }} + - podSelector: + matchLabels: {{- include "common.tplvalues.render" (dict "value" .Values.networkPolicy.ingressRules.readReplicasAccessOnlyFrom.podSelector "context" $) | nindent 14 }} + {{- end }} + ports: + - port: {{ .Values.containerPorts.postgresql }} + {{- end }} + {{- if .Values.networkPolicy.ingressRules.readReplicasAccessOnlyFrom.customRules }} + {{- include "common.tplvalues.render" (dict "value" .Values.networkPolicy.ingressRules.readReplicasAccessOnlyFrom.customRules "context" $) | nindent 4 }} + {{- end }} +{{- end }} diff --git a/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/charts/postgresql/templates/read/servicemonitor.yaml b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/charts/postgresql/templates/read/servicemonitor.yaml new file mode 100644 index 00000000..aa06b073 --- /dev/null +++ b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/charts/postgresql/templates/read/servicemonitor.yaml @@ -0,0 +1,48 @@ +{{- if and .Values.metrics.enabled .Values.metrics.serviceMonitor.enabled }} +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: {{ include "postgresql.readReplica.fullname" . }} + namespace: {{ default .Release.Namespace .Values.metrics.serviceMonitor.namespace | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/component: metrics + {{- if .Values.metrics.serviceMonitor.labels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.metrics.serviceMonitor.labels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + {{- if .Values.metrics.serviceMonitor.jobLabel }} + jobLabel: {{ .Values.metrics.serviceMonitor.jobLabel }} + {{- end }} + selector: + matchLabels: {{- include "common.labels.matchLabels" . | nindent 6 }} + {{- if .Values.metrics.serviceMonitor.selector }} + {{- include "common.tplvalues.render" (dict "value" .Values.metrics.serviceMonitor.selector "context" $) | nindent 6 }} + {{- end }} + app.kubernetes.io/component: metrics + endpoints: + - port: http-metrics + {{- if .Values.metrics.serviceMonitor.interval }} + interval: {{ .Values.metrics.serviceMonitor.interval }} + {{- end }} + {{- if .Values.metrics.serviceMonitor.scrapeTimeout }} + scrapeTimeout: {{ .Values.metrics.serviceMonitor.scrapeTimeout }} + {{- end }} + {{- if .Values.metrics.serviceMonitor.relabelings }} + relabelings: {{- include "common.tplvalues.render" ( dict "value" .Values.metrics.serviceMonitor.relabelings "context" $) | nindent 6 }} + {{- end }} + {{- if .Values.metrics.serviceMonitor.metricRelabelings }} + metricRelabelings: {{- include "common.tplvalues.render" ( dict "value" .Values.metrics.serviceMonitor.metricRelabelings "context" $) | nindent 6 }} + {{- end }} + {{- if .Values.metrics.serviceMonitor.honorLabels }} + honorLabels: {{ .Values.metrics.serviceMonitor.honorLabels }} + {{- end }} + namespaceSelector: + matchNames: + - {{ .Release.Namespace | quote }} +{{- end }} diff --git a/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/charts/postgresql/templates/read/statefulset.yaml b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/charts/postgresql/templates/read/statefulset.yaml new file mode 100644 index 00000000..fa986c10 --- /dev/null +++ b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/charts/postgresql/templates/read/statefulset.yaml @@ -0,0 +1,523 @@ +{{- if eq .Values.architecture "replication" }} +{{- $customUser := include "postgresql.username" . }} +apiVersion: {{ include "common.capabilities.statefulset.apiVersion" . }} +kind: StatefulSet +metadata: + name: {{ include "postgresql.readReplica.fullname" . }} + namespace: {{ .Release.Namespace | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/component: read + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.readReplicas.labels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.readReplicas.labels "context" $ ) | nindent 4 }} + {{- end }} + annotations: + {{- if .Values.commonAnnotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.readReplicas.annotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.readReplicas.annotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + replicas: {{ .Values.readReplicas.replicaCount }} + serviceName: {{ include "postgresql.readReplica.svc.headless" . }} + {{- if .Values.readReplicas.updateStrategy }} + updateStrategy: {{- toYaml .Values.readReplicas.updateStrategy | nindent 4 }} + {{- end }} + selector: + matchLabels: {{- include "common.labels.matchLabels" . | nindent 6 }} + app.kubernetes.io/component: read + template: + metadata: + name: {{ include "postgresql.readReplica.fullname" . }} + labels: {{- include "common.labels.standard" . | nindent 8 }} + app.kubernetes.io/component: read + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 8 }} + {{- end }} + {{- if .Values.readReplicas.podLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.readReplicas.podLabels "context" $ ) | nindent 8 }} + {{- end }} + annotations: + {{- if .Values.readReplicas.podAnnotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.readReplicas.podAnnotations "context" $ ) | nindent 8 }} + {{- end }} + spec: + {{- if .Values.readReplicas.extraPodSpec }} + {{- include "common.tplvalues.render" (dict "value" .Values.readReplicas.extraPodSpec "context" $) | nindent 6 }} + {{- end }} + serviceAccountName: {{ include "postgresql.serviceAccountName" . }} + {{- include "postgresql.imagePullSecrets" . | nindent 6 }} + {{- if .Values.readReplicas.hostAliases }} + hostAliases: {{- include "common.tplvalues.render" (dict "value" .Values.readReplicas.hostAliases "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.readReplicas.affinity }} + affinity: {{- include "common.tplvalues.render" (dict "value" .Values.readReplicas.affinity "context" $) | nindent 8 }} + {{- else }} + affinity: + podAffinity: {{- include "common.affinities.pods" (dict "type" .Values.readReplicas.podAffinityPreset "component" "read" "context" $) | nindent 10 }} + podAntiAffinity: {{- include "common.affinities.pods" (dict "type" .Values.readReplicas.podAntiAffinityPreset "component" "read" "context" $) | nindent 10 }} + nodeAffinity: {{- include "common.affinities.nodes" (dict "type" .Values.readReplicas.nodeAffinityPreset.type "key" .Values.readReplicas.nodeAffinityPreset.key "values" .Values.readReplicas.nodeAffinityPreset.values) | nindent 10 }} + {{- end }} + {{- if .Values.readReplicas.nodeSelector }} + nodeSelector: {{- include "common.tplvalues.render" (dict "value" .Values.readReplicas.nodeSelector "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.readReplicas.tolerations }} + tolerations: {{- include "common.tplvalues.render" (dict "value" .Values.readReplicas.tolerations "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.readReplicas.topologySpreadConstraints }} + topologySpreadConstraints: {{- include "common.tplvalues.render" (dict "value" .Values.readReplicas.topologySpreadConstraints "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.readReplicas.priorityClassName }} + priorityClassName: {{ .Values.readReplicas.priorityClassName }} + {{- end }} + {{- if .Values.readReplicas.schedulerName }} + schedulerName: {{ .Values.readReplicas.schedulerName | quote }} + {{- end }} + {{- if .Values.readReplicas.terminationGracePeriodSeconds }} + terminationGracePeriodSeconds: {{ .Values.readReplicas.terminationGracePeriodSeconds }} + {{- end }} + {{- if .Values.readReplicas.podSecurityContext.enabled }} + securityContext: {{- omit .Values.readReplicas.podSecurityContext "enabled" | toYaml | nindent 8 }} + {{- end }} + hostNetwork: {{ .Values.readReplicas.hostNetwork }} + hostIPC: {{ .Values.readReplicas.hostIPC }} + initContainers: + {{- if and .Values.tls.enabled (not .Values.volumePermissions.enabled) }} + - name: copy-certs + image: {{ include "postgresql.volumePermissions.image" . }} + imagePullPolicy: {{ .Values.volumePermissions.image.pullPolicy | quote }} + {{- if .Values.readReplicas.resources }} + resources: {{- toYaml .Values.readReplicas.resources | nindent 12 }} + {{- end }} + # We don't require a privileged container in this case + {{- if .Values.readReplicas.containerSecurityContext.enabled }} + securityContext: {{- omit .Values.readReplicas.containerSecurityContext "enabled" | toYaml | nindent 12 }} + {{- end }} + command: + - /bin/sh + - -ec + - | + cp /tmp/certs/* /opt/bitnami/postgresql/certs/ + chmod 600 {{ include "postgresql.tlsCertKey" . }} + volumeMounts: + - name: raw-certificates + mountPath: /tmp/certs + - name: postgresql-certificates + mountPath: /opt/bitnami/postgresql/certs + {{- else if and .Values.volumePermissions.enabled (or .Values.readReplicas.persistence.enabled .Values.shmVolume.enabled) }} + - name: init-chmod-data + image: {{ include "postgresql.volumePermissions.image" . }} + imagePullPolicy: {{ .Values.volumePermissions.image.pullPolicy | quote }} + {{- if .Values.readReplicas.resources }} + resources: {{- toYaml .Values.readReplicas.resources | nindent 12 }} + {{- end }} + command: + - /bin/sh + - -ec + - | + {{- if .Values.readReplicas.persistence.enabled }} + {{- if eq ( toString ( .Values.volumePermissions.containerSecurityContext.runAsUser )) "auto" }} + chown `id -u`:`id -G | cut -d " " -f2` {{ .Values.readReplicas.persistence.mountPath }} + {{- else }} + chown {{ .Values.readReplicas.containerSecurityContext.runAsUser }}:{{ .Values.readReplicas.podSecurityContext.fsGroup }} {{ .Values.readReplicas.persistence.mountPath }} + {{- end }} + mkdir -p {{ .Values.readReplicas.persistence.mountPath }}/data {{- if (include "postgresql.mountConfigurationCM" .) }} {{ .Values.readReplicas.persistence.mountPath }}/conf {{- end }} + chmod 700 {{ .Values.readReplicas.persistence.mountPath }}/data {{- if (include "postgresql.mountConfigurationCM" .) }} {{ .Values.readReplicas.persistence.mountPath }}/conf {{- end }} + find {{ .Values.readReplicas.persistence.mountPath }} -mindepth 1 -maxdepth 1 {{- if not (include "postgresql.mountConfigurationCM" .) }} -not -name "conf" {{- end }} -not -name ".snapshot" -not -name "lost+found" | \ + {{- if eq ( toString ( .Values.volumePermissions.containerSecurityContext.runAsUser )) "auto" }} + xargs -r chown -R `id -u`:`id -G | cut -d " " -f2` + {{- else }} + xargs -r chown -R {{ .Values.readReplicas.containerSecurityContext.runAsUser }}:{{ .Values.readReplicas.podSecurityContext.fsGroup }} + {{- end }} + {{- end }} + {{- if .Values.shmVolume.enabled }} + chmod -R 777 /dev/shm + {{- end }} + {{- if .Values.tls.enabled }} + cp /tmp/certs/* /opt/bitnami/postgresql/certs/ + {{- if eq ( toString ( .Values.volumePermissions.containerSecurityContext.runAsUser )) "auto" }} + chown -R `id -u`:`id -G | cut -d " " -f2` /opt/bitnami/postgresql/certs/ + {{- else }} + chown -R {{ .Values.readReplicas.containerSecurityContext.runAsUser }}:{{ .Values.readReplicas.podSecurityContext.fsGroup }} /opt/bitnami/postgresql/certs/ + {{- end }} + chmod 600 {{ include "postgresql.tlsCertKey" . }} + {{- end }} + {{- if eq ( toString ( .Values.volumePermissions.containerSecurityContext.runAsUser )) "auto" }} + securityContext: {{- omit .Values.volumePermissions.containerSecurityContext "runAsUser" | toYaml | nindent 12 }} + {{- else }} + securityContext: {{- .Values.volumePermissions.containerSecurityContext | toYaml | nindent 12 }} + {{- end }} + volumeMounts: + {{ if .Values.readReplicas.persistence.enabled }} + - name: data + mountPath: {{ .Values.readReplicas.persistence.mountPath }} + {{- if .Values.readReplicas.persistence.subPath }} + subPath: {{ .Values.readReplicas.persistence.subPath }} + {{- end }} + {{- end }} + {{- if .Values.shmVolume.enabled }} + - name: dshm + mountPath: /dev/shm + {{- end }} + {{- if .Values.tls.enabled }} + - name: raw-certificates + mountPath: /tmp/certs + - name: postgresql-certificates + mountPath: /opt/bitnami/postgresql/certs + {{- end }} + {{- end }} + {{- if .Values.readReplicas.initContainers }} + {{- include "common.tplvalues.render" ( dict "value" .Values.readReplicas.initContainers "context" $ ) | nindent 8 }} + {{- end }} + containers: + - name: postgresql + image: {{ include "postgresql.image" . }} + imagePullPolicy: {{ .Values.image.pullPolicy | quote }} + {{- if .Values.readReplicas.containerSecurityContext.enabled }} + securityContext: {{- omit .Values.readReplicas.containerSecurityContext "enabled" | toYaml | nindent 12 }} + {{- end }} + {{- if .Values.diagnosticMode.enabled }} + command: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.command "context" $) | nindent 12 }} + {{- else if .Values.readReplicas.command }} + command: {{- include "common.tplvalues.render" (dict "value" .Values.readReplicas.command "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.diagnosticMode.enabled }} + args: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.args "context" $) | nindent 12 }} + {{- else if .Values.readReplicas.args }} + args: {{- include "common.tplvalues.render" (dict "value" .Values.readReplicas.args "context" $) | nindent 12 }} + {{- end }} + env: + - name: BITNAMI_DEBUG + value: {{ ternary "true" "false" (or .Values.image.debug .Values.diagnosticMode.enabled) | quote }} + - name: POSTGRESQL_PORT_NUMBER + value: {{ .Values.containerPorts.postgresql | quote }} + - name: POSTGRESQL_VOLUME_DIR + value: {{ .Values.readReplicas.persistence.mountPath | quote }} + {{- if .Values.readReplicas.persistence.mountPath }} + - name: PGDATA + value: {{ .Values.postgresqlDataDir | quote }} + {{- end }} + # Authentication + {{- if and (not (empty $customUser)) (ne $customUser "postgres") .Values.auth.enablePostgresUser }} + {{- if .Values.auth.usePasswordFiles }} + - name: POSTGRES_POSTGRES_PASSWORD_FILE + value: "/opt/bitnami/postgresql/secrets/postgres-password" + {{- else }} + - name: POSTGRES_POSTGRES_PASSWORD + valueFrom: + secretKeyRef: + name: {{ include "postgresql.secretName" . }} + key: {{ include "postgresql.adminPasswordKey" . }} + {{- end }} + {{- end }} + {{- if .Values.auth.usePasswordFiles }} + - name: POSTGRES_PASSWORD_FILE + value: {{ printf "/opt/bitnami/postgresql/secrets/%s" (ternary "password" "postgres-password" (and (not (empty $customUser)) (ne $customUser "postgres"))) }} + {{- else }} + - name: POSTGRES_PASSWORD + valueFrom: + secretKeyRef: + name: {{ include "postgresql.secretName" . }} + key: {{ include "postgresql.userPasswordKey" . }} + {{- end }} + # Replication + - name: POSTGRES_REPLICATION_MODE + value: "slave" + - name: POSTGRES_REPLICATION_USER + value: {{ .Values.auth.replicationUsername | quote }} + {{- if .Values.auth.usePasswordFiles }} + - name: POSTGRES_REPLICATION_PASSWORD_FILE + value: "/opt/bitnami/postgresql/secrets/replication-password" + {{- else }} + - name: POSTGRES_REPLICATION_PASSWORD + valueFrom: + secretKeyRef: + name: {{ include "postgresql.secretName" . }} + key: {{ include "postgresql.replicationPasswordKey" . }} + {{- end }} + - name: POSTGRES_CLUSTER_APP_NAME + value: {{ .Values.replication.applicationName }} + - name: POSTGRES_MASTER_HOST + value: {{ include "postgresql.primary.fullname" . }} + - name: POSTGRES_MASTER_PORT_NUMBER + value: {{ include "postgresql.service.port" . | quote }} + # TLS + - name: POSTGRESQL_ENABLE_TLS + value: {{ ternary "yes" "no" .Values.tls.enabled | quote }} + {{- if .Values.tls.enabled }} + - name: POSTGRESQL_TLS_PREFER_SERVER_CIPHERS + value: {{ ternary "yes" "no" .Values.tls.preferServerCiphers | quote }} + - name: POSTGRESQL_TLS_CERT_FILE + value: {{ include "postgresql.tlsCert" . }} + - name: POSTGRESQL_TLS_KEY_FILE + value: {{ include "postgresql.tlsCertKey" . }} + {{- if .Values.tls.certCAFilename }} + - name: POSTGRESQL_TLS_CA_FILE + value: {{ include "postgresql.tlsCACert" . }} + {{- end }} + {{- if .Values.tls.crlFilename }} + - name: POSTGRESQL_TLS_CRL_FILE + value: {{ include "postgresql.tlsCRL" . }} + {{- end }} + {{- end }} + # Audit + - name: POSTGRESQL_LOG_HOSTNAME + value: {{ .Values.audit.logHostname | quote }} + - name: POSTGRESQL_LOG_CONNECTIONS + value: {{ .Values.audit.logConnections | quote }} + - name: POSTGRESQL_LOG_DISCONNECTIONS + value: {{ .Values.audit.logDisconnections | quote }} + {{- if .Values.audit.logLinePrefix }} + - name: POSTGRESQL_LOG_LINE_PREFIX + value: {{ .Values.audit.logLinePrefix | quote }} + {{- end }} + {{- if .Values.audit.logTimezone }} + - name: POSTGRESQL_LOG_TIMEZONE + value: {{ .Values.audit.logTimezone | quote }} + {{- end }} + {{- if .Values.audit.pgAuditLog }} + - name: POSTGRESQL_PGAUDIT_LOG + value: {{ .Values.audit.pgAuditLog | quote }} + {{- end }} + - name: POSTGRESQL_PGAUDIT_LOG_CATALOG + value: {{ .Values.audit.pgAuditLogCatalog | quote }} + # Others + - name: POSTGRESQL_CLIENT_MIN_MESSAGES + value: {{ .Values.audit.clientMinMessages | quote }} + - name: POSTGRESQL_SHARED_PRELOAD_LIBRARIES + value: {{ .Values.postgresqlSharedPreloadLibraries | quote }} + {{- if .Values.readReplicas.extraEnvVars }} + {{- include "common.tplvalues.render" (dict "value" .Values.readReplicas.extraEnvVars "context" $) | nindent 12 }} + {{- end }} + {{- if or .Values.readReplicas.extraEnvVarsCM .Values.readReplicas.extraEnvVarsSecret }} + envFrom: + {{- if .Values.readReplicas.extraEnvVarsCM }} + - configMapRef: + name: {{ .Values.readReplicas.extraEnvVarsCM }} + {{- end }} + {{- if .Values.readReplicas.extraEnvVarsSecret }} + - secretRef: + name: {{ .Values.readReplicas.extraEnvVarsSecret }} + {{- end }} + {{- end }} + ports: + - name: tcp-postgresql + containerPort: {{ .Values.containerPorts.postgresql }} + {{- if not .Values.diagnosticMode.enabled }} + {{- if .Values.readReplicas.startupProbe.enabled }} + startupProbe: {{- include "common.tplvalues.render" (dict "value" (omit .Values.readReplicas.startupProbe "enabled") "context" $) | nindent 12 }} + exec: + command: + - /bin/sh + - -c + {{- if (include "postgresql.database" .) }} + - exec pg_isready -U {{ default "postgres" $customUser| quote }} -d "dbname={{ include "postgresql.database" . }} {{- if and .Values.tls.enabled .Values.tls.certCAFilename }} sslcert={{ include "postgresql.tlsCert" . }} sslkey={{ include "postgresql.tlsCertKey" . }}{{- end }}" -h 127.0.0.1 -p {{ .Values.containerPorts.postgresql }} + {{- else }} + - exec pg_isready -U {{ default "postgres" $customUser | quote }} {{- if and .Values.tls.enabled .Values.tls.certCAFilename }} -d "sslcert={{ include "postgresql.tlsCert" . }} sslkey={{ include "postgresql.tlsCertKey" . }}"{{- end }} -h 127.0.0.1 -p {{ .Values.containerPorts.postgresql }} + {{- end }} + {{- else if .Values.readReplicas.customStartupProbe }} + startupProbe: {{- include "common.tplvalues.render" (dict "value" .Values.readReplicas.customStartupProbe "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.readReplicas.livenessProbe.enabled }} + livenessProbe: {{- include "common.tplvalues.render" (dict "value" (omit .Values.readReplicas.livenessProbe "enabled") "context" $) | nindent 12 }} + exec: + command: + - /bin/sh + - -c + {{- if (include "postgresql.database" .) }} + - exec pg_isready -U {{ default "postgres" $customUser | quote }} -d "dbname={{ include "postgresql.database" . }} {{- if and .Values.tls.enabled .Values.tls.certCAFilename }} sslcert={{ include "postgresql.tlsCert" . }} sslkey={{ include "postgresql.tlsCertKey" . }}{{- end }}" -h 127.0.0.1 -p {{ .Values.containerPorts.postgresql }} + {{- else }} + - exec pg_isready -U {{default "postgres" $customUser | quote }} {{- if and .Values.tls.enabled .Values.tls.certCAFilename }} -d "sslcert={{ include "postgresql.tlsCert" . }} sslkey={{ include "postgresql.tlsCertKey" . }}"{{- end }} -h 127.0.0.1 -p {{ .Values.containerPorts.postgresql }} + {{- end }} + {{- else if .Values.readReplicas.customLivenessProbe }} + livenessProbe: {{- include "common.tplvalues.render" (dict "value" .Values.readReplicas.customLivenessProbe "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.readReplicas.readinessProbe.enabled }} + readinessProbe: {{- include "common.tplvalues.render" (dict "value" (omit .Values.readReplicas.readinessProbe "enabled") "context" $) | nindent 12 }} + exec: + command: + - /bin/sh + - -c + - -e + {{- include "postgresql.readinessProbeCommand" . | nindent 16 }} + {{- else if .Values.readReplicas.customReadinessProbe }} + readinessProbe: {{- include "common.tplvalues.render" (dict "value" .Values.readReplicas.customReadinessProbe "context" $) | nindent 12 }} + {{- end }} + {{- end }} + {{- if .Values.readReplicas.resources }} + resources: {{- toYaml .Values.readReplicas.resources | nindent 12 }} + {{- end }} + {{- if .Values.readReplicas.lifecycleHooks }} + lifecycle: {{- include "common.tplvalues.render" (dict "value" .Values.readReplicas.lifecycleHooks "context" $) | nindent 12 }} + {{- end }} + volumeMounts: + {{- if .Values.auth.usePasswordFiles }} + - name: postgresql-password + mountPath: /opt/bitnami/postgresql/secrets/ + {{- end }} + {{- if .Values.tls.enabled }} + - name: postgresql-certificates + mountPath: /opt/bitnami/postgresql/certs + readOnly: true + {{- end }} + {{- if .Values.shmVolume.enabled }} + - name: dshm + mountPath: /dev/shm + {{- end }} + {{- if .Values.readReplicas.persistence.enabled }} + - name: data + mountPath: {{ .Values.readReplicas.persistence.mountPath }} + {{- if .Values.readReplicas.persistence.subPath }} + subPath: {{ .Values.readReplicas.persistence.subPath }} + {{- end }} + {{- end }} + {{- if .Values.readReplicas.extraVolumeMounts }} + {{- include "common.tplvalues.render" (dict "value" .Values.readReplicas.extraVolumeMounts "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.metrics.enabled }} + - name: metrics + image: {{ include "postgresql.metrics.image" . }} + imagePullPolicy: {{ .Values.metrics.image.pullPolicy | quote }} + {{- if .Values.metrics.containerSecurityContext.enabled }} + securityContext: {{- omit .Values.metrics.containerSecurityContext "enabled" | toYaml | nindent 12 }} + {{- end }} + {{- if .Values.diagnosticMode.enabled }} + command: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.command "context" $) | nindent 12 }} + args: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.args "context" $) | nindent 12 }} + {{- else if .Values.metrics.customMetrics }} + args: [ "--extend.query-path", "/conf/custom-metrics.yaml" ] + {{- end }} + env: + {{- $database := required "In order to enable metrics you need to specify a database (.Values.auth.database or .Values.global.postgresql.auth.database)" (include "postgresql.database" .) }} + {{- $sslmode := ternary "require" "disable" .Values.tls.enabled }} + {{- if and .Values.tls.enabled .Values.tls.certCAFilename }} + - name: DATA_SOURCE_NAME + value: {{ printf "host=127.0.0.1 port=%d user=%s sslmode=%s sslcert=%s sslkey=%s" (int (include "postgresql.service.port" .)) (default "postgres" $customUser | quote) $sslmode (include "postgresql.tlsCert" .) (include "postgresql.tlsCertKey" .) }} + {{- else }} + - name: DATA_SOURCE_URI + value: {{ printf "127.0.0.1:%d/%s?sslmode=%s" (int (include "postgresql.service.port" .)) $database $sslmode }} + {{- end }} + {{- if .Values.auth.usePasswordFiles }} + - name: DATA_SOURCE_PASS_FILE + value: {{ printf "/opt/bitnami/postgresql/secrets/%s" (ternary "password" "postgres-password" (and (not (empty $customUser)) (ne $customUser "postgres"))) }} + {{- else }} + - name: DATA_SOURCE_PASS + valueFrom: + secretKeyRef: + name: {{ include "postgresql.secretName" . }} + key: {{ include "postgresql.userPasswordKey" . }} + {{- end }} + - name: DATA_SOURCE_USER + value: {{ default "postgres" $customUser | quote }} + {{- if .Values.metrics.extraEnvVars }} + {{- include "common.tplvalues.render" (dict "value" .Values.metrics.extraEnvVars "context" $) | nindent 12 }} + {{- end }} + ports: + - name: http-metrics + containerPort: {{ .Values.metrics.containerPorts.metrics }} + {{- if not .Values.diagnosticMode.enabled }} + {{- if .Values.metrics.startupProbe.enabled }} + startupProbe: {{- include "common.tplvalues.render" (dict "value" (omit .Values.metrics.startupProbe "enabled") "context" $) | nindent 12 }} + tcpSocket: + port: http-metrics + {{- else if .Values.metrics.customStartupProbe }} + startupProbe: {{- include "common.tplvalues.render" (dict "value" .Values.metrics.customStartupProbe "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.metrics.livenessProbe.enabled }} + livenessProbe: {{- include "common.tplvalues.render" (dict "value" (omit .Values.metrics.livenessProbe "enabled") "context" $) | nindent 12 }} + httpGet: + path: / + port: http-metrics + {{- else if .Values.metrics.customLivenessProbe }} + livenessProbe: {{- include "common.tplvalues.render" (dict "value" .Values.metrics.customLivenessProbe "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.metrics.readinessProbe.enabled }} + readinessProbe: {{- include "common.tplvalues.render" (dict "value" (omit .Values.metrics.readinessProbe "enabled") "context" $) | nindent 12 }} + httpGet: + path: / + port: http-metrics + {{- else if .Values.metrics.customReadinessProbe }} + readinessProbe: {{- include "common.tplvalues.render" (dict "value" .Values.metrics.customReadinessProbe "context" $) | nindent 12 }} + {{- end }} + {{- end }} + volumeMounts: + {{- if .Values.auth.usePasswordFiles }} + - name: postgresql-password + mountPath: /opt/bitnami/postgresql/secrets/ + {{- end }} + {{- if .Values.tls.enabled }} + - name: postgresql-certificates + mountPath: /opt/bitnami/postgresql/certs + readOnly: true + {{- end }} + {{- if .Values.metrics.customMetrics }} + - name: custom-metrics + mountPath: /conf + readOnly: true + {{- end }} + {{- if .Values.metrics.resources }} + resources: {{- toYaml .Values.metrics.resources | nindent 12 }} + {{- end }} + {{- end }} + {{- if .Values.readReplicas.sidecars }} + {{- include "common.tplvalues.render" ( dict "value" .Values.readReplicas.sidecars "context" $ ) | nindent 8 }} + {{- end }} + volumes: + {{- if .Values.auth.usePasswordFiles }} + - name: postgresql-password + secret: + secretName: {{ include "postgresql.secretName" . }} + {{- end }} + {{- if .Values.tls.enabled }} + - name: raw-certificates + secret: + secretName: {{ include "postgresql.tlsSecretName" . }} + - name: postgresql-certificates + emptyDir: {} + {{- end }} + {{- if and .Values.metrics.enabled .Values.metrics.customMetrics }} + - name: custom-metrics + configMap: + name: {{ printf "%s-metrics" (include "common.names.fullname" .) }} + {{- end }} + {{- if .Values.shmVolume.enabled }} + - name: dshm + emptyDir: + medium: Memory + {{- if .Values.shmVolume.sizeLimit }} + sizeLimit: {{ .Values.shmVolume.sizeLimit }} + {{- end }} + {{- end }} + {{- if .Values.readReplicas.extraVolumes }} + {{- include "common.tplvalues.render" ( dict "value" .Values.readReplicas.extraVolumes "context" $ ) | nindent 8 }} + {{- end }} + {{- if not .Values.readReplicas.persistence.enabled }} + - name: data + emptyDir: {} + {{- else }} + volumeClaimTemplates: + - metadata: + name: data + {{- if .Values.readReplicas.persistence.annotations }} + annotations: {{- toYaml .Values.readReplicas.persistence.annotations | nindent 10 }} + {{- end }} + spec: + accessModes: + {{- range .Values.readReplicas.persistence.accessModes }} + - {{ . | quote }} + {{- end }} + {{- if .Values.readReplicas.persistence.dataSource }} + dataSource: {{- include "common.tplvalues.render" (dict "value" .Values.readReplicas.persistence.dataSource "context" $) | nindent 10 }} + {{- end }} + resources: + requests: + storage: {{ .Values.readReplicas.persistence.size | quote }} + {{- if .Values.readReplicas.persistence.selector }} + selector: {{- include "common.tplvalues.render" (dict "value" .Values.readReplicas.persistence.selector "context" $) | nindent 10 }} + {{- end -}} + {{- include "common.storage.class" (dict "persistence" .Values.readReplicas.persistence "global" .Values.global) | nindent 8 }} + {{- end }} +{{- end }} diff --git a/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/charts/postgresql/templates/read/svc-headless.yaml b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/charts/postgresql/templates/read/svc-headless.yaml new file mode 100644 index 00000000..0371e49d --- /dev/null +++ b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/charts/postgresql/templates/read/svc-headless.yaml @@ -0,0 +1,33 @@ +{{- if eq .Values.architecture "replication" }} +apiVersion: v1 +kind: Service +metadata: + name: {{ include "postgresql.readReplica.svc.headless" . }} + namespace: {{ .Release.Namespace | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + app.kubernetes.io/component: read + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} + # Use this annotation in addition to the actual publishNotReadyAddresses + # field below because the annotation will stop being respected soon but the + # field is broken in some versions of Kubernetes: + # https://github.com/kubernetes/kubernetes/issues/58662 + service.alpha.kubernetes.io/tolerate-unready-endpoints: "true" +spec: + type: ClusterIP + clusterIP: None + # We want all pods in the StatefulSet to have their addresses published for + # the sake of the other Postgresql pods even before they're ready, since they + # have to be able to talk to each other in order to become ready. + publishNotReadyAddresses: true + ports: + - name: tcp-postgresql + port: {{ include "postgresql.readReplica.service.port" . }} + targetPort: tcp-postgresql + selector: {{- include "common.labels.matchLabels" . | nindent 4 }} + app.kubernetes.io/component: read +{{- end }} diff --git a/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/charts/postgresql/templates/read/svc.yaml b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/charts/postgresql/templates/read/svc.yaml new file mode 100644 index 00000000..3eece4db --- /dev/null +++ b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/charts/postgresql/templates/read/svc.yaml @@ -0,0 +1,53 @@ +{{- if eq .Values.architecture "replication" }} +apiVersion: v1 +kind: Service +metadata: + name: {{ include "postgresql.readReplica.fullname" . }} + namespace: {{ .Release.Namespace | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + app.kubernetes.io/component: read + annotations: + {{- if .Values.commonAnnotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.readReplicas.service.annotations }} + {{- include "common.tplvalues.render" (dict "value" .Values.readReplicas.service.annotations "context" $) | nindent 4 }} + {{- end }} +spec: + type: {{ .Values.readReplicas.service.type }} + {{- if or (eq .Values.readReplicas.service.type "LoadBalancer") (eq .Values.readReplicas.service.type "NodePort") }} + externalTrafficPolicy: {{ .Values.readReplicas.service.externalTrafficPolicy | quote }} + {{- end }} + {{- if and (eq .Values.readReplicas.service.type "LoadBalancer") (not (empty .Values.readReplicas.service.loadBalancerSourceRanges)) }} + loadBalancerSourceRanges: {{ .Values.readReplicas.service.loadBalancerSourceRanges }} + {{- end }} + {{- if and (eq .Values.readReplicas.service.type "LoadBalancer") (not (empty .Values.readReplicas.service.loadBalancerIP)) }} + loadBalancerIP: {{ .Values.readReplicas.service.loadBalancerIP }} + {{- end }} + {{- if and .Values.readReplicas.service.clusterIP (eq .Values.readReplicas.service.type "ClusterIP") }} + clusterIP: {{ .Values.readReplicas.service.clusterIP }} + {{- end }} + {{- if .Values.readReplicas.service.sessionAffinity }} + sessionAffinity: {{ .Values.readReplicas.service.sessionAffinity }} + {{- end }} + {{- if .Values.readReplicas.service.sessionAffinityConfig }} + sessionAffinityConfig: {{- include "common.tplvalues.render" (dict "value" .Values.readReplicas.service.sessionAffinityConfig "context" $) | nindent 4 }} + {{- end }} + ports: + - name: tcp-postgresql + port: {{ include "postgresql.readReplica.service.port" . }} + targetPort: tcp-postgresql + {{- if and (or (eq .Values.readReplicas.service.type "NodePort") (eq .Values.readReplicas.service.type "LoadBalancer")) (not (empty .Values.readReplicas.service.nodePorts.postgresql)) }} + nodePort: {{ .Values.readReplicas.service.nodePorts.postgresql }} + {{- else if eq .Values.readReplicas.service.type "ClusterIP" }} + nodePort: null + {{- end }} + {{- if .Values.readReplicas.service.extraPorts }} + {{- include "common.tplvalues.render" (dict "value" .Values.readReplicas.service.extraPorts "context" $) | nindent 4 }} + {{- end }} + selector: {{- include "common.labels.matchLabels" . | nindent 4 }} + app.kubernetes.io/component: read +{{- end }} diff --git a/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/charts/postgresql/templates/role.yaml b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/charts/postgresql/templates/role.yaml new file mode 100644 index 00000000..00f92223 --- /dev/null +++ b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/charts/postgresql/templates/role.yaml @@ -0,0 +1,31 @@ +{{- if .Values.rbac.create }} +kind: Role +apiVersion: {{ include "common.capabilities.rbac.apiVersion" . }} +metadata: + name: {{ include "common.names.fullname" . }} + namespace: {{ .Release.Namespace | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +# yamllint disable rule:indentation +rules: + {{- $pspAvailable := (semverCompare "<1.25-0" (include "common.capabilities.kubeVersion" .)) -}} + {{- if and $pspAvailable .Values.psp.create }} + - apiGroups: + - 'policy' + resources: + - 'podsecuritypolicies' + verbs: + - 'use' + resourceNames: + - {{ include "common.names.fullname" . }} + {{- end }} + {{- if .Values.rbac.rules }} + {{- include "common.tplvalues.render" ( dict "value" .Values.rbac.rules "context" $ ) | nindent 2 }} + {{- end }} +# yamllint enable rule:indentation +{{- end }} diff --git a/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/charts/postgresql/templates/rolebinding.yaml b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/charts/postgresql/templates/rolebinding.yaml new file mode 100644 index 00000000..0311c0ec --- /dev/null +++ b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/charts/postgresql/templates/rolebinding.yaml @@ -0,0 +1,22 @@ +{{- if .Values.rbac.create }} +kind: RoleBinding +apiVersion: {{ include "common.capabilities.rbac.apiVersion" . }} +metadata: + name: {{ include "common.names.fullname" . }} + namespace: {{ .Release.Namespace | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +roleRef: + kind: Role + name: {{ include "common.names.fullname" . }} + apiGroup: rbac.authorization.k8s.io +subjects: + - kind: ServiceAccount + name: {{ include "postgresql.serviceAccountName" . }} + namespace: {{ .Release.Namespace | quote }} +{{- end }} diff --git a/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/charts/postgresql/templates/secrets.yaml b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/charts/postgresql/templates/secrets.yaml new file mode 100644 index 00000000..5f28fb37 --- /dev/null +++ b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/charts/postgresql/templates/secrets.yaml @@ -0,0 +1,29 @@ +{{- if (include "postgresql.createSecret" .) }} +apiVersion: v1 +kind: Secret +metadata: + name: {{ include "common.names.fullname" . }} + namespace: {{ .Release.Namespace | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +type: Opaque +data: + {{- if .Values.auth.enablePostgresUser }} + postgres-password: {{ include "common.secrets.passwords.manage" (dict "secret" (include "common.names.fullname" .) "key" "postgres-password" "providedValues" (list "global.postgresql.auth.postgresPassword" "auth.postgresPassword") "context" $) }} + {{- end }} + {{- if not (empty (include "postgresql.username" .)) }} + password: {{ include "common.secrets.passwords.manage" (dict "secret" (include "common.names.fullname" .) "key" "password" "providedValues" (list "global.postgresql.auth.password" "auth.password") "context" $) }} + {{- end }} + {{- if eq .Values.architecture "replication" }} + replication-password: {{ include "common.secrets.passwords.manage" (dict "secret" (include "common.names.fullname" .) "key" "replication-password" "providedValues" (list "auth.replicationPassword") "context" $) }} + {{- end }} + # We don't auto-generate LDAP password when it's not provided as we do for other passwords + {{- if and .Values.ldap.enabled (or .Values.ldap.bind_password .Values.ldap.bindpw) }} + ldap-password: {{ coalesce .Values.ldap.bind_password .Values.ldap.bindpw | b64enc | quote }} + {{- end }} +{{- end -}} diff --git a/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/charts/postgresql/templates/serviceaccount.yaml b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/charts/postgresql/templates/serviceaccount.yaml new file mode 100644 index 00000000..179f8f2e --- /dev/null +++ b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/charts/postgresql/templates/serviceaccount.yaml @@ -0,0 +1,19 @@ +{{- if .Values.serviceAccount.create }} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ include "postgresql.serviceAccountName" . }} + namespace: {{ .Release.Namespace | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + annotations: + {{- if .Values.commonAnnotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.serviceAccount.annotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.serviceAccount.annotations "context" $ ) | nindent 4 }} + {{- end }} +automountServiceAccountToken: {{ .Values.serviceAccount.automountServiceAccountToken }} +{{- end }} diff --git a/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/charts/postgresql/templates/tls-secrets.yaml b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/charts/postgresql/templates/tls-secrets.yaml new file mode 100644 index 00000000..59c57764 --- /dev/null +++ b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/charts/postgresql/templates/tls-secrets.yaml @@ -0,0 +1,27 @@ +{{- if (include "postgresql.createTlsSecret" . ) }} +{{- $ca := genCA "postgresql-ca" 365 }} +{{- $fullname := include "common.names.fullname" . }} +{{- $releaseNamespace := .Release.Namespace }} +{{- $clusterDomain := .Values.clusterDomain }} +{{- $primaryHeadlessServiceName := include "postgresql.primary.svc.headless" . }} +{{- $readHeadlessServiceName := include "postgresql.readReplica.svc.headless" . }} +{{- $altNames := list (printf "*.%s.%s.svc.%s" $fullname $releaseNamespace $clusterDomain) (printf "%s.%s.svc.%s" $fullname $releaseNamespace $clusterDomain) (printf "*.%s.%s.svc.%s" $primaryHeadlessServiceName $releaseNamespace $clusterDomain) (printf "%s.%s.svc.%s" $primaryHeadlessServiceName $releaseNamespace $clusterDomain) (printf "*.%s.%s.svc.%s" $readHeadlessServiceName $releaseNamespace $clusterDomain) (printf "%s.%s.svc.%s" $readHeadlessServiceName $releaseNamespace $clusterDomain) $fullname }} +{{- $crt := genSignedCert $fullname nil $altNames 365 $ca }} +apiVersion: v1 +kind: Secret +metadata: + name: {{ printf "%s-crt" (include "common.names.fullname" .) }} + namespace: {{ .Release.Namespace | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +type: kubernetes.io/tls +data: + ca.crt: {{ $ca.Cert | b64enc | quote }} + tls.crt: {{ $crt.Cert | b64enc | quote }} + tls.key: {{ $crt.Key | b64enc | quote }} +{{- end }} diff --git a/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/charts/postgresql/values.schema.json b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/charts/postgresql/values.schema.json new file mode 100644 index 00000000..fc41483c --- /dev/null +++ b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/charts/postgresql/values.schema.json @@ -0,0 +1,156 @@ +{ + "$schema": "http://json-schema.org/schema#", + "type": "object", + "properties": { + "architecture": { + "type": "string", + "title": "PostgreSQL architecture", + "form": true, + "description": "Allowed values: `standalone` or `replication`" + }, + "auth": { + "type": "object", + "title": "Authentication configuration", + "form": true, + "properties": { + "enablePostgresUser": { + "type": "boolean", + "title": "Enable \"postgres\" admin user", + "description": "Assign a password to the \"postgres\" admin user. Otherwise, remote access will be blocked for this user", + "form": true + }, + "postgresPassword": { + "type": "string", + "title": "Password for the \"postgres\" admin user", + "description": "Defaults to a random 10-character alphanumeric string if not set", + "form": true + }, + "database": { + "type": "string", + "title": "PostgreSQL custom database", + "description": "Name of the custom database to be created during the 1st initialization of PostgreSQL", + "form": true + }, + "username": { + "type": "string", + "title": "PostgreSQL custom user", + "description": "Name of the custom user to be created during the 1st initialization of PostgreSQL. This user only has permissions on the PostgreSQL custom database", + "form": true + }, + "password": { + "type": "string", + "title": "Password for the custom user to create", + "description": "Defaults to a random 10-character alphanumeric string if not set", + "form": true + }, + "replicationUsername": { + "type": "string", + "title": "PostgreSQL replication user", + "description": "Name of user used to manage replication.", + "form": true, + "hidden": { + "value": "standalone", + "path": "architecture" + } + }, + "replicationPassword": { + "type": "string", + "title": "Password for PostgreSQL replication user", + "description": "Defaults to a random 10-character alphanumeric string if not set", + "form": true, + "hidden": { + "value": "standalone", + "path": "architecture" + } + } + } + }, + "persistence": { + "type": "object", + "properties": { + "size": { + "type": "string", + "title": "Persistent Volume Size", + "form": true, + "render": "slider", + "sliderMin": 1, + "sliderMax": 100, + "sliderUnit": "Gi" + } + } + }, + "resources": { + "type": "object", + "title": "Required Resources", + "description": "Configure resource requests", + "form": true, + "properties": { + "requests": { + "type": "object", + "properties": { + "memory": { + "type": "string", + "form": true, + "render": "slider", + "title": "Memory Request", + "sliderMin": 10, + "sliderMax": 2048, + "sliderUnit": "Mi" + }, + "cpu": { + "type": "string", + "form": true, + "render": "slider", + "title": "CPU Request", + "sliderMin": 10, + "sliderMax": 2000, + "sliderUnit": "m" + } + } + } + } + }, + "replication": { + "type": "object", + "form": true, + "title": "Replication Details", + "properties": { + "enabled": { + "type": "boolean", + "title": "Enable Replication", + "form": true + }, + "readReplicas": { + "type": "integer", + "title": "read Replicas", + "form": true, + "hidden": { + "value": "standalone", + "path": "architecture" + } + } + } + }, + "volumePermissions": { + "type": "object", + "properties": { + "enabled": { + "type": "boolean", + "form": true, + "title": "Enable Init Containers", + "description": "Change the owner of the persist volume mountpoint to RunAsUser:fsGroup" + } + } + }, + "metrics": { + "type": "object", + "properties": { + "enabled": { + "type": "boolean", + "title": "Configure metrics exporter", + "form": true + } + } + } + } +} diff --git a/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/charts/postgresql/values.yaml b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/charts/postgresql/values.yaml new file mode 100644 index 00000000..0bddba63 --- /dev/null +++ b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/charts/postgresql/values.yaml @@ -0,0 +1,1374 @@ +## @section Global parameters +## Please, note that this will override the parameters, including dependencies, configured to use the global value +## +global: + ## @param global.imageRegistry Global Docker image registry + ## + imageRegistry: "" + ## @param global.imagePullSecrets Global Docker registry secret names as an array + ## e.g. + ## imagePullSecrets: + ## - myRegistryKeySecretName + ## + imagePullSecrets: [] + ## @param global.storageClass Global StorageClass for Persistent Volume(s) + ## + storageClass: "" + postgresql: + ## @param global.postgresql.auth.postgresPassword Password for the "postgres" admin user (overrides `auth.postgresPassword`) + ## @param global.postgresql.auth.username Name for a custom user to create (overrides `auth.username`) + ## @param global.postgresql.auth.password Password for the custom user to create (overrides `auth.password`) + ## @param global.postgresql.auth.database Name for a custom database to create (overrides `auth.database`) + ## @param global.postgresql.auth.existingSecret Name of existing secret to use for PostgreSQL credentials (overrides `auth.existingSecret`). + ## @param global.postgresql.auth.secretKeys.adminPasswordKey Name of key in existing secret to use for PostgreSQL credentials (overrides `auth.secretKeys.adminPasswordKey`). Only used when `global.postgresql.auth.existingSecret` is set. + ## @param global.postgresql.auth.secretKeys.userPasswordKey Name of key in existing secret to use for PostgreSQL credentials (overrides `auth.secretKeys.userPasswordKey`). Only used when `global.postgresql.auth.existingSecret` is set. + ## @param global.postgresql.auth.secretKeys.replicationPasswordKey Name of key in existing secret to use for PostgreSQL credentials (overrides `auth.secretKeys.replicationPasswordKey`). Only used when `global.postgresql.auth.existingSecret` is set. + ## + auth: + postgresPassword: "" + username: "" + password: "" + database: "" + existingSecret: "" + secretKeys: + adminPasswordKey: "" + userPasswordKey: "" + replicationPasswordKey: "" + ## @param global.postgresql.service.ports.postgresql PostgreSQL service port (overrides `service.ports.postgresql`) + ## + service: + ports: + postgresql: "" + +## @section Common parameters +## + +## @param kubeVersion Override Kubernetes version +## +kubeVersion: "" +## @param nameOverride String to partially override common.names.fullname template (will maintain the release name) +## +nameOverride: "" +## @param fullnameOverride String to fully override common.names.fullname template +## +fullnameOverride: "" +## @param clusterDomain Kubernetes Cluster Domain +## +clusterDomain: cluster.local +## @param extraDeploy Array of extra objects to deploy with the release (evaluated as a template) +## +extraDeploy: [] +## @param commonLabels Add labels to all the deployed resources +## +commonLabels: {} +## @param commonAnnotations Add annotations to all the deployed resources +## +commonAnnotations: {} +## Enable diagnostic mode in the statefulset +## +diagnosticMode: + ## @param diagnosticMode.enabled Enable diagnostic mode (all probes will be disabled and the command will be overridden) + ## + enabled: false + ## @param diagnosticMode.command Command to override all containers in the statefulset + ## + command: + - sleep + ## @param diagnosticMode.args Args to override all containers in the statefulset + ## + args: + - infinity + +## @section PostgreSQL common parameters +## + +## Bitnami PostgreSQL image version +## ref: https://hub.docker.com/r/bitnami/postgresql/tags/ +## @param image.registry PostgreSQL image registry +## @param image.repository PostgreSQL image repository +## @param image.tag PostgreSQL image tag (immutable tags are recommended) +## @param image.pullPolicy PostgreSQL image pull policy +## @param image.pullSecrets Specify image pull secrets +## @param image.debug Specify if debug values should be set +## +image: + registry: docker.io + repository: bitnami/postgresql + tag: 14.4.0-debian-11-r4 + ## Specify a imagePullPolicy + ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + ## ref: https://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## Example: + ## pullSecrets: + ## - myRegistryKeySecretName + ## + pullSecrets: [] + ## Set to true if you would like to see extra information on logs + ## + debug: false +## Authentication parameters +## ref: https://github.com/bitnami/bitnami-docker-postgresql/blob/master/README.md#setting-the-root-password-on-first-run +## ref: https://github.com/bitnami/bitnami-docker-postgresql/blob/master/README.md#creating-a-database-on-first-run +## ref: https://github.com/bitnami/bitnami-docker-postgresql/blob/master/README.md#creating-a-database-user-on-first-run +## +auth: + ## @param auth.enablePostgresUser Assign a password to the "postgres" admin user. Otherwise, remote access will be blocked for this user + ## + enablePostgresUser: true + ## @param auth.postgresPassword Password for the "postgres" admin user. Ignored if `auth.existingSecret` with key `postgres-password` is provided + ## + postgresPassword: "" + ## @param auth.username Name for a custom user to create + ## + username: "" + ## @param auth.password Password for the custom user to create. Ignored if `auth.existingSecret` with key `password` is provided + ## + password: "" + ## @param auth.database Name for a custom database to create + ## + database: "" + ## @param auth.replicationUsername Name of the replication user + ## + replicationUsername: repl_user + ## @param auth.replicationPassword Password for the replication user. Ignored if `auth.existingSecret` with key `replication-password` is provided + ## + replicationPassword: "" + ## @param auth.existingSecret Name of existing secret to use for PostgreSQL credentials. `auth.postgresPassword`, `auth.password`, and `auth.replicationPassword` will be ignored and picked up from this secret. The secret might also contains the key `ldap-password` if LDAP is enabled. `ldap.bind_password` will be ignored and picked from this secret in this case. + ## + existingSecret: "" + ## @param auth.secretKeys.adminPasswordKey Name of key in existing secret to use for PostgreSQL credentials. Only used when `auth.existingSecret` is set. + ## @param auth.secretKeys.userPasswordKey Name of key in existing secret to use for PostgreSQL credentials. Only used when `auth.existingSecret` is set. + ## @param auth.secretKeys.replicationPasswordKey Name of key in existing secret to use for PostgreSQL credentials. Only used when `auth.existingSecret` is set. + ## + secretKeys: + adminPasswordKey: postgres-password + userPasswordKey: password + replicationPasswordKey: replication-password + ## @param auth.usePasswordFiles Mount credentials as a files instead of using an environment variable + ## + usePasswordFiles: false +## @param architecture PostgreSQL architecture (`standalone` or `replication`) +## +architecture: standalone +## Replication configuration +## Ignored if `architecture` is `standalone` +## +replication: + ## @param replication.synchronousCommit Set synchronous commit mode. Allowed values: `on`, `remote_apply`, `remote_write`, `local` and `off` + ## @param replication.numSynchronousReplicas Number of replicas that will have synchronous replication. Note: Cannot be greater than `readReplicas.replicaCount`. + ## ref: https://www.postgresql.org/docs/current/runtime-config-wal.html#GUC-SYNCHRONOUS-COMMIT + ## + synchronousCommit: "off" + numSynchronousReplicas: 0 + ## @param replication.applicationName Cluster application name. Useful for advanced replication settings + ## + applicationName: my_application +## @param containerPorts.postgresql PostgreSQL container port +## +containerPorts: + postgresql: 5432 +## Audit settings +## https://github.com/bitnami/bitnami-docker-postgresql#auditing +## @param audit.logHostname Log client hostnames +## @param audit.logConnections Add client log-in operations to the log file +## @param audit.logDisconnections Add client log-outs operations to the log file +## @param audit.pgAuditLog Add operations to log using the pgAudit extension +## @param audit.pgAuditLogCatalog Log catalog using pgAudit +## @param audit.clientMinMessages Message log level to share with the user +## @param audit.logLinePrefix Template for log line prefix (default if not set) +## @param audit.logTimezone Timezone for the log timestamps +## +audit: + logHostname: false + logConnections: false + logDisconnections: false + pgAuditLog: "" + pgAuditLogCatalog: "off" + clientMinMessages: error + logLinePrefix: "" + logTimezone: "" +## LDAP configuration +## @param ldap.enabled Enable LDAP support +## DEPRECATED ldap.url It will removed in a future, please use 'ldap.uri' instead +## @param ldap.server IP address or name of the LDAP server. +## @param ldap.port Port number on the LDAP server to connect to +## @param ldap.prefix String to prepend to the user name when forming the DN to bind +## @param ldap.suffix String to append to the user name when forming the DN to bind +## DEPRECATED ldap.baseDN It will removed in a future, please use 'ldap.basedn' instead +## DEPRECATED ldap.bindDN It will removed in a future, please use 'ldap.binddn' instead +## DEPRECATED ldap.bind_password It will removed in a future, please use 'ldap.bindpw' instead +## @param ldap.basedn Root DN to begin the search for the user in +## @param ldap.binddn DN of user to bind to LDAP +## @param ldap.bindpw Password for the user to bind to LDAP +## DEPRECATED ldap.search_attr It will removed in a future, please use 'ldap.searchAttribute' instead +## DEPRECATED ldap.search_filter It will removed in a future, please use 'ldap.searchFilter' instead +## @param ldap.searchAttribute Attribute to match against the user name in the search +## @param ldap.searchFilter The search filter to use when doing search+bind authentication +## @param ldap.scheme Set to `ldaps` to use LDAPS +## DEPRECATED ldap.tls as string is deprecated,please use 'ldap.tls.enabled' instead +## @param ldap.tls.enabled Se to true to enable TLS encryption +## +ldap: + enabled: false + server: "" + port: "" + prefix: "" + suffix: "" + basedn: "" + binddn: "" + bindpw: "" + searchAttribute: "" + searchFilter: "" + scheme: "" + tls: + enabled: false + ## @param ldap.uri LDAP URL beginning in the form `ldap[s]://host[:port]/basedn`. If provided, all the other LDAP parameters will be ignored. + ## Ref: https://www.postgresql.org/docs/current/auth-ldap.html + uri: "" +## @param postgresqlDataDir PostgreSQL data dir folder +## +postgresqlDataDir: /bitnami/postgresql/data +## @param postgresqlSharedPreloadLibraries Shared preload libraries (comma-separated list) +## +postgresqlSharedPreloadLibraries: "pgaudit" +## Start PostgreSQL pod(s) without limitations on shm memory. +## By default docker and containerd (and possibly other container runtimes) limit `/dev/shm` to `64M` +## ref: https://github.com/docker-library/postgres/issues/416 +## ref: https://github.com/containerd/containerd/issues/3654 +## +shmVolume: + ## @param shmVolume.enabled Enable emptyDir volume for /dev/shm for PostgreSQL pod(s) + ## + enabled: true + ## @param shmVolume.sizeLimit Set this to enable a size limit on the shm tmpfs + ## Note: the size of the tmpfs counts against container's memory limit + ## e.g: + ## sizeLimit: 1Gi + ## + sizeLimit: "" +## TLS configuration +## +tls: + ## @param tls.enabled Enable TLS traffic support + ## + enabled: false + ## @param tls.autoGenerated Generate automatically self-signed TLS certificates + ## + autoGenerated: false + ## @param tls.preferServerCiphers Whether to use the server's TLS cipher preferences rather than the client's + ## + preferServerCiphers: true + ## @param tls.certificatesSecret Name of an existing secret that contains the certificates + ## + certificatesSecret: "" + ## @param tls.certFilename Certificate filename + ## + certFilename: "" + ## @param tls.certKeyFilename Certificate key filename + ## + certKeyFilename: "" + ## @param tls.certCAFilename CA Certificate filename + ## If provided, PostgreSQL will authenticate TLS/SSL clients by requesting them a certificate + ## ref: https://www.postgresql.org/docs/9.6/auth-methods.html + ## + certCAFilename: "" + ## @param tls.crlFilename File containing a Certificate Revocation List + ## + crlFilename: "" + +## @section PostgreSQL Primary parameters +## +primary: + ## @param primary.configuration PostgreSQL Primary main configuration to be injected as ConfigMap + ## ref: https://www.postgresql.org/docs/current/static/runtime-config.html + ## + configuration: "" + ## @param primary.pgHbaConfiguration PostgreSQL Primary client authentication configuration + ## ref: https://www.postgresql.org/docs/current/static/auth-pg-hba-conf.html + ## e.g:# + ## pgHbaConfiguration: |- + ## local all all trust + ## host all all localhost trust + ## host mydatabase mysuser 192.168.0.0/24 md5 + ## + pgHbaConfiguration: "" + ## @param primary.existingConfigmap Name of an existing ConfigMap with PostgreSQL Primary configuration + ## NOTE: `primary.configuration` and `primary.pgHbaConfiguration` will be ignored + ## + existingConfigmap: "" + ## @param primary.extendedConfiguration Extended PostgreSQL Primary configuration (appended to main or default configuration) + ## ref: https://github.com/bitnami/bitnami-docker-postgresql#allow-settings-to-be-loaded-from-files-other-than-the-default-postgresqlconf + ## + extendedConfiguration: "" + ## @param primary.existingExtendedConfigmap Name of an existing ConfigMap with PostgreSQL Primary extended configuration + ## NOTE: `primary.extendedConfiguration` will be ignored + ## + existingExtendedConfigmap: "" + ## Initdb configuration + ## ref: https://github.com/bitnami/bitnami-docker-postgresql/blob/master/README.md#specifying-initdb-arguments + ## + initdb: + ## @param primary.initdb.args PostgreSQL initdb extra arguments + ## + args: "" + ## @param primary.initdb.postgresqlWalDir Specify a custom location for the PostgreSQL transaction log + ## + postgresqlWalDir: "" + ## @param primary.initdb.scripts Dictionary of initdb scripts + ## Specify dictionary of scripts to be run at first boot + ## e.g: + ## scripts: + ## my_init_script.sh: | + ## #!/bin/sh + ## echo "Do something." + ## + scripts: {} + ## @param primary.initdb.scriptsConfigMap ConfigMap with scripts to be run at first boot + ## NOTE: This will override `primary.initdb.scripts` + ## + scriptsConfigMap: "" + ## @param primary.initdb.scriptsSecret Secret with scripts to be run at first boot (in case it contains sensitive information) + ## NOTE: This can work along `primary.initdb.scripts` or `primary.initdb.scriptsConfigMap` + ## + scriptsSecret: "" + ## @param primary.initdb.user Specify the PostgreSQL username to execute the initdb scripts + ## + user: "" + ## @param primary.initdb.password Specify the PostgreSQL password to execute the initdb scripts + ## + password: "" + ## Configure current cluster's primary server to be the standby server in other cluster. + ## This will allow cross cluster replication and provide cross cluster high availability. + ## You will need to configure pgHbaConfiguration if you want to enable this feature with local cluster replication enabled. + ## @param primary.standby.enabled Whether to enable current cluster's primary as standby server of another cluster or not + ## @param primary.standby.primaryHost The Host of replication primary in the other cluster + ## @param primary.standby.primaryPort The Port of replication primary in the other cluster + ## + standby: + enabled: false + primaryHost: "" + primaryPort: "" + ## @param primary.extraEnvVars Array with extra environment variables to add to PostgreSQL Primary nodes + ## e.g: + ## extraEnvVars: + ## - name: FOO + ## value: "bar" + ## + extraEnvVars: [] + ## @param primary.extraEnvVarsCM Name of existing ConfigMap containing extra env vars for PostgreSQL Primary nodes + ## + extraEnvVarsCM: "" + ## @param primary.extraEnvVarsSecret Name of existing Secret containing extra env vars for PostgreSQL Primary nodes + ## + extraEnvVarsSecret: "" + ## @param primary.command Override default container command (useful when using custom images) + ## + command: [] + ## @param primary.args Override default container args (useful when using custom images) + ## + args: [] + ## Configure extra options for PostgreSQL Primary containers' liveness, readiness and startup probes + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/#configure-probes + ## @param primary.livenessProbe.enabled Enable livenessProbe on PostgreSQL Primary containers + ## @param primary.livenessProbe.initialDelaySeconds Initial delay seconds for livenessProbe + ## @param primary.livenessProbe.periodSeconds Period seconds for livenessProbe + ## @param primary.livenessProbe.timeoutSeconds Timeout seconds for livenessProbe + ## @param primary.livenessProbe.failureThreshold Failure threshold for livenessProbe + ## @param primary.livenessProbe.successThreshold Success threshold for livenessProbe + ## + livenessProbe: + enabled: true + initialDelaySeconds: 30 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 6 + successThreshold: 1 + ## @param primary.readinessProbe.enabled Enable readinessProbe on PostgreSQL Primary containers + ## @param primary.readinessProbe.initialDelaySeconds Initial delay seconds for readinessProbe + ## @param primary.readinessProbe.periodSeconds Period seconds for readinessProbe + ## @param primary.readinessProbe.timeoutSeconds Timeout seconds for readinessProbe + ## @param primary.readinessProbe.failureThreshold Failure threshold for readinessProbe + ## @param primary.readinessProbe.successThreshold Success threshold for readinessProbe + ## + readinessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 6 + successThreshold: 1 + ## @param primary.startupProbe.enabled Enable startupProbe on PostgreSQL Primary containers + ## @param primary.startupProbe.initialDelaySeconds Initial delay seconds for startupProbe + ## @param primary.startupProbe.periodSeconds Period seconds for startupProbe + ## @param primary.startupProbe.timeoutSeconds Timeout seconds for startupProbe + ## @param primary.startupProbe.failureThreshold Failure threshold for startupProbe + ## @param primary.startupProbe.successThreshold Success threshold for startupProbe + ## + startupProbe: + enabled: false + initialDelaySeconds: 30 + periodSeconds: 10 + timeoutSeconds: 1 + failureThreshold: 15 + successThreshold: 1 + ## @param primary.customLivenessProbe Custom livenessProbe that overrides the default one + ## + customLivenessProbe: {} + ## @param primary.customReadinessProbe Custom readinessProbe that overrides the default one + ## + customReadinessProbe: {} + ## @param primary.customStartupProbe Custom startupProbe that overrides the default one + ## + customStartupProbe: {} + ## @param primary.lifecycleHooks for the PostgreSQL Primary container to automate configuration before or after startup + ## + lifecycleHooks: {} + ## PostgreSQL Primary resource requests and limits + ## ref: https://kubernetes.io/docs/user-guide/compute-resources/ + ## @param primary.resources.limits The resources limits for the PostgreSQL Primary containers + ## @param primary.resources.requests.memory The requested memory for the PostgreSQL Primary containers + ## @param primary.resources.requests.cpu The requested cpu for the PostgreSQL Primary containers + ## + resources: + limits: {} + requests: + memory: 256Mi + cpu: 250m + ## Pod Security Context + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ + ## @param primary.podSecurityContext.enabled Enable security context + ## @param primary.podSecurityContext.fsGroup Group ID for the pod + ## + podSecurityContext: + enabled: true + fsGroup: 1001 + ## Container Security Context + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ + ## @param primary.containerSecurityContext.enabled Enable container security context + ## @param primary.containerSecurityContext.runAsUser User ID for the container + ## + containerSecurityContext: + enabled: true + runAsUser: 1001 + ## @param primary.hostAliases PostgreSQL primary pods host aliases + ## https://kubernetes.io/docs/concepts/services-networking/add-entries-to-pod-etc-hosts-with-host-aliases/ + ## + hostAliases: [] + ## @param primary.hostNetwork Specify if host network should be enabled for PostgreSQL pod (postgresql primary) + ## + hostNetwork: false + ## @param primary.hostIPC Specify if host IPC should be enabled for PostgreSQL pod (postgresql primary) + ## + hostIPC: false + ## @param primary.labels Map of labels to add to the statefulset (postgresql primary) + ## + labels: {} + ## @param primary.annotations Annotations for PostgreSQL primary pods + ## + annotations: {} + ## @param primary.podLabels Map of labels to add to the pods (postgresql primary) + ## + podLabels: {} + ## @param primary.podAnnotations Map of annotations to add to the pods (postgresql primary) + ## + podAnnotations: {} + ## @param primary.podAffinityPreset PostgreSQL primary pod affinity preset. Ignored if `primary.affinity` is set. Allowed values: `soft` or `hard` + ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity + ## + podAffinityPreset: "" + ## @param primary.podAntiAffinityPreset PostgreSQL primary pod anti-affinity preset. Ignored if `primary.affinity` is set. Allowed values: `soft` or `hard` + ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity + ## + podAntiAffinityPreset: soft + ## PostgreSQL Primary node affinity preset + ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#node-affinity + ## + nodeAffinityPreset: + ## @param primary.nodeAffinityPreset.type PostgreSQL primary node affinity preset type. Ignored if `primary.affinity` is set. Allowed values: `soft` or `hard` + ## + type: "" + ## @param primary.nodeAffinityPreset.key PostgreSQL primary node label key to match Ignored if `primary.affinity` is set. + ## E.g. + ## key: "kubernetes.io/e2e-az-name" + ## + key: "" + ## @param primary.nodeAffinityPreset.values PostgreSQL primary node label values to match. Ignored if `primary.affinity` is set. + ## E.g. + ## values: + ## - e2e-az1 + ## - e2e-az2 + ## + values: [] + ## @param primary.affinity Affinity for PostgreSQL primary pods assignment + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity + ## Note: primary.podAffinityPreset, primary.podAntiAffinityPreset, and primary.nodeAffinityPreset will be ignored when it's set + ## + affinity: {} + ## @param primary.nodeSelector Node labels for PostgreSQL primary pods assignment + ## ref: https://kubernetes.io/docs/user-guide/node-selection/ + ## + nodeSelector: {} + ## @param primary.tolerations Tolerations for PostgreSQL primary pods assignment + ## ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ + ## + tolerations: [] + ## @param primary.topologySpreadConstraints Topology Spread Constraints for pod assignment spread across your cluster among failure-domains. Evaluated as a template + ## Ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/#spread-constraints-for-pods + ## + topologySpreadConstraints: [] + ## @param primary.priorityClassName Priority Class to use for each pod (postgresql primary) + ## + priorityClassName: "" + ## @param primary.schedulerName Use an alternate scheduler, e.g. "stork". + ## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ + ## + schedulerName: "" + ## @param primary.terminationGracePeriodSeconds Seconds PostgreSQL primary pod needs to terminate gracefully + ## ref: https://kubernetes.io/docs/concepts/workloads/pods/pod/#termination-of-pods + ## + terminationGracePeriodSeconds: "" + ## @param primary.updateStrategy.type PostgreSQL Primary statefulset strategy type + ## @param primary.updateStrategy.rollingUpdate PostgreSQL Primary statefulset rolling update configuration parameters + ## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#update-strategies + ## + updateStrategy: + type: RollingUpdate + rollingUpdate: {} + ## @param primary.extraVolumeMounts Optionally specify extra list of additional volumeMounts for the PostgreSQL Primary container(s) + ## + extraVolumeMounts: [] + ## @param primary.extraVolumes Optionally specify extra list of additional volumes for the PostgreSQL Primary pod(s) + ## + extraVolumes: [] + ## @param primary.sidecars Add additional sidecar containers to the PostgreSQL Primary pod(s) + ## For example: + ## sidecars: + ## - name: your-image-name + ## image: your-image + ## imagePullPolicy: Always + ## ports: + ## - name: portname + ## containerPort: 1234 + ## + sidecars: [] + ## @param primary.initContainers Add additional init containers to the PostgreSQL Primary pod(s) + ## Example + ## + ## initContainers: + ## - name: do-something + ## image: busybox + ## command: ['do', 'something'] + ## + initContainers: [] + ## @param primary.extraPodSpec Optionally specify extra PodSpec for the PostgreSQL Primary pod(s) + ## + extraPodSpec: {} + ## PostgreSQL Primary service configuration + ## + service: + ## @param primary.service.type Kubernetes Service type + ## + type: ClusterIP + ## @param primary.service.ports.postgresql PostgreSQL service port + ## + ports: + postgresql: 5432 + ## Node ports to expose + ## NOTE: choose port between <30000-32767> + ## @param primary.service.nodePorts.postgresql Node port for PostgreSQL + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + ## + nodePorts: + postgresql: "" + ## @param primary.service.clusterIP Static clusterIP or None for headless services + ## e.g: + ## clusterIP: None + ## + clusterIP: "" + ## @param primary.service.annotations Annotations for PostgreSQL primary service + ## + annotations: {} + ## @param primary.service.loadBalancerIP Load balancer IP if service type is `LoadBalancer` + ## Set the LoadBalancer service type to internal only + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer + ## + loadBalancerIP: "" + ## @param primary.service.externalTrafficPolicy Enable client source IP preservation + ## ref https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip + ## + externalTrafficPolicy: Cluster + ## @param primary.service.loadBalancerSourceRanges Addresses that are allowed when service is LoadBalancer + ## https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/#restrict-access-for-loadbalancer-service + ## + ## loadBalancerSourceRanges: + ## - 10.10.10.0/24 + ## + loadBalancerSourceRanges: [] + ## @param primary.service.extraPorts Extra ports to expose in the PostgreSQL primary service + ## + extraPorts: [] + ## @param primary.service.sessionAffinity Session Affinity for Kubernetes service, can be "None" or "ClientIP" + ## If "ClientIP", consecutive client requests will be directed to the same Pod + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies + ## + sessionAffinity: None + ## @param primary.service.sessionAffinityConfig Additional settings for the sessionAffinity + ## sessionAffinityConfig: + ## clientIP: + ## timeoutSeconds: 300 + ## + sessionAffinityConfig: {} + ## PostgreSQL Primary persistence configuration + ## + persistence: + ## @param primary.persistence.enabled Enable PostgreSQL Primary data persistence using PVC + ## + enabled: true + ## @param primary.persistence.existingClaim Name of an existing PVC to use + ## + existingClaim: "" + ## @param primary.persistence.mountPath The path the volume will be mounted at + ## Note: useful when using custom PostgreSQL images + ## + mountPath: /bitnami/postgresql + ## @param primary.persistence.subPath The subdirectory of the volume to mount to + ## Useful in dev environments and one PV for multiple services + ## + subPath: "" + ## @param primary.persistence.storageClass PVC Storage Class for PostgreSQL Primary data volume + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is + ## set, choosing the default provisioner. (gp2 on AWS, standard on + ## GKE, AWS & OpenStack) + ## + storageClass: "" + ## @param primary.persistence.accessModes PVC Access Mode for PostgreSQL volume + ## + accessModes: + - ReadWriteOnce + ## @param primary.persistence.size PVC Storage Request for PostgreSQL volume + ## + size: 8Gi + ## @param primary.persistence.annotations Annotations for the PVC + ## + annotations: {} + ## @param primary.persistence.selector Selector to match an existing Persistent Volume (this value is evaluated as a template) + ## selector: + ## matchLabels: + ## app: my-app + ## + selector: {} + ## @param primary.persistence.dataSource Custom PVC data source + ## + dataSource: {} + +## @section PostgreSQL read only replica parameters +## +readReplicas: + ## @param readReplicas.replicaCount Number of PostgreSQL read only replicas + ## + replicaCount: 1 + ## @param readReplicas.extraEnvVars Array with extra environment variables to add to PostgreSQL read only nodes + ## e.g: + ## extraEnvVars: + ## - name: FOO + ## value: "bar" + ## + extraEnvVars: [] + ## @param readReplicas.extraEnvVarsCM Name of existing ConfigMap containing extra env vars for PostgreSQL read only nodes + ## + extraEnvVarsCM: "" + ## @param readReplicas.extraEnvVarsSecret Name of existing Secret containing extra env vars for PostgreSQL read only nodes + ## + extraEnvVarsSecret: "" + ## @param readReplicas.command Override default container command (useful when using custom images) + ## + command: [] + ## @param readReplicas.args Override default container args (useful when using custom images) + ## + args: [] + ## Configure extra options for PostgreSQL read only containers' liveness, readiness and startup probes + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/#configure-probes + ## @param readReplicas.livenessProbe.enabled Enable livenessProbe on PostgreSQL read only containers + ## @param readReplicas.livenessProbe.initialDelaySeconds Initial delay seconds for livenessProbe + ## @param readReplicas.livenessProbe.periodSeconds Period seconds for livenessProbe + ## @param readReplicas.livenessProbe.timeoutSeconds Timeout seconds for livenessProbe + ## @param readReplicas.livenessProbe.failureThreshold Failure threshold for livenessProbe + ## @param readReplicas.livenessProbe.successThreshold Success threshold for livenessProbe + ## + livenessProbe: + enabled: true + initialDelaySeconds: 30 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 6 + successThreshold: 1 + ## @param readReplicas.readinessProbe.enabled Enable readinessProbe on PostgreSQL read only containers + ## @param readReplicas.readinessProbe.initialDelaySeconds Initial delay seconds for readinessProbe + ## @param readReplicas.readinessProbe.periodSeconds Period seconds for readinessProbe + ## @param readReplicas.readinessProbe.timeoutSeconds Timeout seconds for readinessProbe + ## @param readReplicas.readinessProbe.failureThreshold Failure threshold for readinessProbe + ## @param readReplicas.readinessProbe.successThreshold Success threshold for readinessProbe + ## + readinessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 6 + successThreshold: 1 + ## @param readReplicas.startupProbe.enabled Enable startupProbe on PostgreSQL read only containers + ## @param readReplicas.startupProbe.initialDelaySeconds Initial delay seconds for startupProbe + ## @param readReplicas.startupProbe.periodSeconds Period seconds for startupProbe + ## @param readReplicas.startupProbe.timeoutSeconds Timeout seconds for startupProbe + ## @param readReplicas.startupProbe.failureThreshold Failure threshold for startupProbe + ## @param readReplicas.startupProbe.successThreshold Success threshold for startupProbe + ## + startupProbe: + enabled: false + initialDelaySeconds: 30 + periodSeconds: 10 + timeoutSeconds: 1 + failureThreshold: 15 + successThreshold: 1 + ## @param readReplicas.customLivenessProbe Custom livenessProbe that overrides the default one + ## + customLivenessProbe: {} + ## @param readReplicas.customReadinessProbe Custom readinessProbe that overrides the default one + ## + customReadinessProbe: {} + ## @param readReplicas.customStartupProbe Custom startupProbe that overrides the default one + ## + customStartupProbe: {} + ## @param readReplicas.lifecycleHooks for the PostgreSQL read only container to automate configuration before or after startup + ## + lifecycleHooks: {} + ## PostgreSQL read only resource requests and limits + ## ref: https://kubernetes.io/docs/user-guide/compute-resources/ + ## @param readReplicas.resources.limits The resources limits for the PostgreSQL read only containers + ## @param readReplicas.resources.requests.memory The requested memory for the PostgreSQL read only containers + ## @param readReplicas.resources.requests.cpu The requested cpu for the PostgreSQL read only containers + ## + resources: + limits: {} + requests: + memory: 256Mi + cpu: 250m + ## Pod Security Context + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ + ## @param readReplicas.podSecurityContext.enabled Enable security context + ## @param readReplicas.podSecurityContext.fsGroup Group ID for the pod + ## + podSecurityContext: + enabled: true + fsGroup: 1001 + ## Container Security Context + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ + ## @param readReplicas.containerSecurityContext.enabled Enable container security context + ## @param readReplicas.containerSecurityContext.runAsUser User ID for the container + ## + containerSecurityContext: + enabled: true + runAsUser: 1001 + ## @param readReplicas.hostAliases PostgreSQL read only pods host aliases + ## https://kubernetes.io/docs/concepts/services-networking/add-entries-to-pod-etc-hosts-with-host-aliases/ + ## + hostAliases: [] + ## @param readReplicas.hostNetwork Specify if host network should be enabled for PostgreSQL pod (PostgreSQL read only) + ## + hostNetwork: false + ## @param readReplicas.hostIPC Specify if host IPC should be enabled for PostgreSQL pod (postgresql primary) + ## + hostIPC: false + ## @param readReplicas.labels Map of labels to add to the statefulset (PostgreSQL read only) + ## + labels: {} + ## @param readReplicas.annotations Annotations for PostgreSQL read only pods + ## + annotations: {} + ## @param readReplicas.podLabels Map of labels to add to the pods (PostgreSQL read only) + ## + podLabels: {} + ## @param readReplicas.podAnnotations Map of annotations to add to the pods (PostgreSQL read only) + ## + podAnnotations: {} + ## @param readReplicas.podAffinityPreset PostgreSQL read only pod affinity preset. Ignored if `primary.affinity` is set. Allowed values: `soft` or `hard` + ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity + ## + podAffinityPreset: "" + ## @param readReplicas.podAntiAffinityPreset PostgreSQL read only pod anti-affinity preset. Ignored if `primary.affinity` is set. Allowed values: `soft` or `hard` + ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity + ## + podAntiAffinityPreset: soft + ## PostgreSQL read only node affinity preset + ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#node-affinity + ## + nodeAffinityPreset: + ## @param readReplicas.nodeAffinityPreset.type PostgreSQL read only node affinity preset type. Ignored if `primary.affinity` is set. Allowed values: `soft` or `hard` + ## + type: "" + ## @param readReplicas.nodeAffinityPreset.key PostgreSQL read only node label key to match Ignored if `primary.affinity` is set. + ## E.g. + ## key: "kubernetes.io/e2e-az-name" + ## + key: "" + ## @param readReplicas.nodeAffinityPreset.values PostgreSQL read only node label values to match. Ignored if `primary.affinity` is set. + ## E.g. + ## values: + ## - e2e-az1 + ## - e2e-az2 + ## + values: [] + ## @param readReplicas.affinity Affinity for PostgreSQL read only pods assignment + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity + ## Note: primary.podAffinityPreset, primary.podAntiAffinityPreset, and primary.nodeAffinityPreset will be ignored when it's set + ## + affinity: {} + ## @param readReplicas.nodeSelector Node labels for PostgreSQL read only pods assignment + ## ref: https://kubernetes.io/docs/user-guide/node-selection/ + ## + nodeSelector: {} + ## @param readReplicas.tolerations Tolerations for PostgreSQL read only pods assignment + ## ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ + ## + tolerations: [] + ## @param readReplicas.topologySpreadConstraints Topology Spread Constraints for pod assignment spread across your cluster among failure-domains. Evaluated as a template + ## Ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/#spread-constraints-for-pods + ## + topologySpreadConstraints: [] + ## @param readReplicas.priorityClassName Priority Class to use for each pod (PostgreSQL read only) + ## + priorityClassName: "" + ## @param readReplicas.schedulerName Use an alternate scheduler, e.g. "stork". + ## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ + ## + schedulerName: "" + ## @param readReplicas.terminationGracePeriodSeconds Seconds PostgreSQL read only pod needs to terminate gracefully + ## ref: https://kubernetes.io/docs/concepts/workloads/pods/pod/#termination-of-pods + ## + terminationGracePeriodSeconds: "" + ## @param readReplicas.updateStrategy.type PostgreSQL read only statefulset strategy type + ## @param readReplicas.updateStrategy.rollingUpdate PostgreSQL read only statefulset rolling update configuration parameters + ## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#update-strategies + ## + updateStrategy: + type: RollingUpdate + rollingUpdate: {} + ## @param readReplicas.extraVolumeMounts Optionally specify extra list of additional volumeMounts for the PostgreSQL read only container(s) + ## + extraVolumeMounts: [] + ## @param readReplicas.extraVolumes Optionally specify extra list of additional volumes for the PostgreSQL read only pod(s) + ## + extraVolumes: [] + ## @param readReplicas.sidecars Add additional sidecar containers to the PostgreSQL read only pod(s) + ## For example: + ## sidecars: + ## - name: your-image-name + ## image: your-image + ## imagePullPolicy: Always + ## ports: + ## - name: portname + ## containerPort: 1234 + ## + sidecars: [] + ## @param readReplicas.initContainers Add additional init containers to the PostgreSQL read only pod(s) + ## Example + ## + ## initContainers: + ## - name: do-something + ## image: busybox + ## command: ['do', 'something'] + ## + initContainers: [] + ## @param readReplicas.extraPodSpec Optionally specify extra PodSpec for the PostgreSQL read only pod(s) + ## + extraPodSpec: {} + ## PostgreSQL read only service configuration + ## + service: + ## @param readReplicas.service.type Kubernetes Service type + ## + type: ClusterIP + ## @param readReplicas.service.ports.postgresql PostgreSQL service port + ## + ports: + postgresql: 5432 + ## Node ports to expose + ## NOTE: choose port between <30000-32767> + ## @param readReplicas.service.nodePorts.postgresql Node port for PostgreSQL + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + ## + nodePorts: + postgresql: "" + ## @param readReplicas.service.clusterIP Static clusterIP or None for headless services + ## e.g: + ## clusterIP: None + ## + clusterIP: "" + ## @param readReplicas.service.annotations Annotations for PostgreSQL read only service + ## + annotations: {} + ## @param readReplicas.service.loadBalancerIP Load balancer IP if service type is `LoadBalancer` + ## Set the LoadBalancer service type to internal only + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer + ## + loadBalancerIP: "" + ## @param readReplicas.service.externalTrafficPolicy Enable client source IP preservation + ## ref https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip + ## + externalTrafficPolicy: Cluster + ## @param readReplicas.service.loadBalancerSourceRanges Addresses that are allowed when service is LoadBalancer + ## https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/#restrict-access-for-loadbalancer-service + ## + ## loadBalancerSourceRanges: + ## - 10.10.10.0/24 + ## + loadBalancerSourceRanges: [] + ## @param readReplicas.service.extraPorts Extra ports to expose in the PostgreSQL read only service + ## + extraPorts: [] + ## @param readReplicas.service.sessionAffinity Session Affinity for Kubernetes service, can be "None" or "ClientIP" + ## If "ClientIP", consecutive client requests will be directed to the same Pod + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies + ## + sessionAffinity: None + ## @param readReplicas.service.sessionAffinityConfig Additional settings for the sessionAffinity + ## sessionAffinityConfig: + ## clientIP: + ## timeoutSeconds: 300 + ## + sessionAffinityConfig: {} + ## PostgreSQL read only persistence configuration + ## + persistence: + ## @param readReplicas.persistence.enabled Enable PostgreSQL read only data persistence using PVC + ## + enabled: true + ## @param readReplicas.persistence.mountPath The path the volume will be mounted at + ## Note: useful when using custom PostgreSQL images + ## + mountPath: /bitnami/postgresql + ## @param readReplicas.persistence.subPath The subdirectory of the volume to mount to + ## Useful in dev environments and one PV for multiple services + ## + subPath: "" + ## @param readReplicas.persistence.storageClass PVC Storage Class for PostgreSQL read only data volume + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is + ## set, choosing the default provisioner. (gp2 on AWS, standard on + ## GKE, AWS & OpenStack) + ## + storageClass: "" + ## @param readReplicas.persistence.accessModes PVC Access Mode for PostgreSQL volume + ## + accessModes: + - ReadWriteOnce + ## @param readReplicas.persistence.size PVC Storage Request for PostgreSQL volume + ## + size: 8Gi + ## @param readReplicas.persistence.annotations Annotations for the PVC + ## + annotations: {} + ## @param readReplicas.persistence.selector Selector to match an existing Persistent Volume (this value is evaluated as a template) + ## selector: + ## matchLabels: + ## app: my-app + ## + selector: {} + ## @param readReplicas.persistence.dataSource Custom PVC data source + ## + dataSource: {} + +## @section NetworkPolicy parameters + +## Add networkpolicies +## +networkPolicy: + ## @param networkPolicy.enabled Enable network policies + ## + enabled: false + ## @param networkPolicy.metrics.enabled Enable network policies for metrics (prometheus) + ## @param networkPolicy.metrics.namespaceSelector [object] Monitoring namespace selector labels. These labels will be used to identify the prometheus' namespace. + ## @param networkPolicy.metrics.podSelector [object] Monitoring pod selector labels. These labels will be used to identify the Prometheus pods. + ## + metrics: + enabled: false + ## e.g: + ## namespaceSelector: + ## label: monitoring + ## + namespaceSelector: {} + ## e.g: + ## podSelector: + ## label: monitoring + ## + podSelector: {} + ## Ingress Rules + ## + ingressRules: + ## @param networkPolicy.ingressRules.primaryAccessOnlyFrom.enabled Enable ingress rule that makes PostgreSQL primary node only accessible from a particular origin. + ## @param networkPolicy.ingressRules.primaryAccessOnlyFrom.namespaceSelector [object] Namespace selector label that is allowed to access the PostgreSQL primary node. This label will be used to identified the allowed namespace(s). + ## @param networkPolicy.ingressRules.primaryAccessOnlyFrom.podSelector [object] Pods selector label that is allowed to access the PostgreSQL primary node. This label will be used to identified the allowed pod(s). + ## @param networkPolicy.ingressRules.primaryAccessOnlyFrom.customRules [object] Custom network policy for the PostgreSQL primary node. + ## + primaryAccessOnlyFrom: + enabled: false + ## e.g: + ## namespaceSelector: + ## label: ingress + ## + namespaceSelector: {} + ## e.g: + ## podSelector: + ## label: access + ## + podSelector: {} + ## custom ingress rules + ## e.g: + ## customRules: + ## - from: + ## - namespaceSelector: + ## matchLabels: + ## label: example + customRules: {} + ## @param networkPolicy.ingressRules.readReplicasAccessOnlyFrom.enabled Enable ingress rule that makes PostgreSQL read-only nodes only accessible from a particular origin. + ## @param networkPolicy.ingressRules.readReplicasAccessOnlyFrom.namespaceSelector [object] Namespace selector label that is allowed to access the PostgreSQL read-only nodes. This label will be used to identified the allowed namespace(s). + ## @param networkPolicy.ingressRules.readReplicasAccessOnlyFrom.podSelector [object] Pods selector label that is allowed to access the PostgreSQL read-only nodes. This label will be used to identified the allowed pod(s). + ## @param networkPolicy.ingressRules.readReplicasAccessOnlyFrom.customRules [object] Custom network policy for the PostgreSQL read-only nodes. + ## + readReplicasAccessOnlyFrom: + enabled: false + ## e.g: + ## namespaceSelector: + ## label: ingress + ## + namespaceSelector: {} + ## e.g: + ## podSelector: + ## label: access + ## + podSelector: {} + ## custom ingress rules + ## e.g: + ## CustomRules: + ## - from: + ## - namespaceSelector: + ## matchLabels: + ## label: example + customRules: {} + ## @param networkPolicy.egressRules.denyConnectionsToExternal Enable egress rule that denies outgoing traffic outside the cluster, except for DNS (port 53). + ## @param networkPolicy.egressRules.customRules [object] Custom network policy rule + ## + egressRules: + # Deny connections to external. This is not compatible with an external database. + denyConnectionsToExternal: false + ## Additional custom egress rules + ## e.g: + ## customRules: + ## - to: + ## - namespaceSelector: + ## matchLabels: + ## label: example + customRules: {} + +## @section Volume Permissions parameters + +## Init containers parameters: +## volumePermissions: Change the owner and group of the persistent volume(s) mountpoint(s) to 'runAsUser:fsGroup' on each node +## +volumePermissions: + ## @param volumePermissions.enabled Enable init container that changes the owner and group of the persistent volume + ## + enabled: false + ## @param volumePermissions.image.registry Init container volume-permissions image registry + ## @param volumePermissions.image.repository Init container volume-permissions image repository + ## @param volumePermissions.image.tag Init container volume-permissions image tag (immutable tags are recommended) + ## @param volumePermissions.image.pullPolicy Init container volume-permissions image pull policy + ## @param volumePermissions.image.pullSecrets Init container volume-permissions image pull secrets + ## + image: + registry: docker.io + repository: bitnami/bitnami-shell + tag: 11-debian-11-r9 + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## Example: + ## pullSecrets: + ## - myRegistryKeySecretName + ## + pullSecrets: [] + ## Init container resource requests and limits + ## ref: https://kubernetes.io/docs/user-guide/compute-resources/ + ## @param volumePermissions.resources.limits Init container volume-permissions resource limits + ## @param volumePermissions.resources.requests Init container volume-permissions resource requests + ## + resources: + limits: {} + requests: {} + ## Init container' Security Context + ## Note: the chown of the data folder is done to containerSecurityContext.runAsUser + ## and not the below volumePermissions.containerSecurityContext.runAsUser + ## @param volumePermissions.containerSecurityContext.runAsUser User ID for the init container + ## + containerSecurityContext: + runAsUser: 0 + +## @section Other Parameters + +## Service account for PostgreSQL to use. +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/ +## +serviceAccount: + ## @param serviceAccount.create Enable creation of ServiceAccount for PostgreSQL pod + ## + create: false + ## @param serviceAccount.name The name of the ServiceAccount to use. + ## If not set and create is true, a name is generated using the common.names.fullname template + ## + name: "" + ## @param serviceAccount.automountServiceAccountToken Allows auto mount of ServiceAccountToken on the serviceAccount created + ## Can be set to false if pods using this serviceAccount do not need to use K8s API + ## + automountServiceAccountToken: true + ## @param serviceAccount.annotations Additional custom annotations for the ServiceAccount + ## + annotations: {} +## Creates role for ServiceAccount +## @param rbac.create Create Role and RoleBinding (required for PSP to work) +## +rbac: + create: false + ## @param rbac.rules Custom RBAC rules to set + ## e.g: + ## rules: + ## - apiGroups: + ## - "" + ## resources: + ## - pods + ## verbs: + ## - get + ## - list + ## + rules: [] +## Pod Security Policy +## ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/ +## @param psp.create Whether to create a PodSecurityPolicy. WARNING: PodSecurityPolicy is deprecated in Kubernetes v1.21 or later, unavailable in v1.25 or later +## +psp: + create: false + +## @section Metrics Parameters + +metrics: + ## @param metrics.enabled Start a prometheus exporter + ## + enabled: false + ## @param metrics.image.registry PostgreSQL Prometheus Exporter image registry + ## @param metrics.image.repository PostgreSQL Prometheus Exporter image repository + ## @param metrics.image.tag PostgreSQL Prometheus Exporter image tag (immutable tags are recommended) + ## @param metrics.image.pullPolicy PostgreSQL Prometheus Exporter image pull policy + ## @param metrics.image.pullSecrets Specify image pull secrets + ## + image: + registry: docker.io + repository: bitnami/postgres-exporter + tag: 0.10.1-debian-11-r9 + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## Example: + ## pullSecrets: + ## - myRegistryKeySecretName + ## + pullSecrets: [] + ## @param metrics.customMetrics Define additional custom metrics + ## ref: https://github.com/wrouesnel/postgres_exporter#adding-new-metrics-via-a-config-file + ## customMetrics: + ## pg_database: + ## query: "SELECT d.datname AS name, CASE WHEN pg_catalog.has_database_privilege(d.datname, 'CONNECT') THEN pg_catalog.pg_database_size(d.datname) ELSE 0 END AS size_bytes FROM pg_catalog.pg_database d where datname not in ('template0', 'template1', 'postgres')" + ## metrics: + ## - name: + ## usage: "LABEL" + ## description: "Name of the database" + ## - size_bytes: + ## usage: "GAUGE" + ## description: "Size of the database in bytes" + ## + customMetrics: {} + ## @param metrics.extraEnvVars Extra environment variables to add to PostgreSQL Prometheus exporter + ## see: https://github.com/wrouesnel/postgres_exporter#environment-variables + ## For example: + ## extraEnvVars: + ## - name: PG_EXPORTER_DISABLE_DEFAULT_METRICS + ## value: "true" + ## + extraEnvVars: [] + ## PostgreSQL Prometheus exporter containers' Security Context + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-container + ## @param metrics.containerSecurityContext.enabled Enable PostgreSQL Prometheus exporter containers' Security Context + ## @param metrics.containerSecurityContext.runAsUser Set PostgreSQL Prometheus exporter containers' Security Context runAsUser + ## @param metrics.containerSecurityContext.runAsNonRoot Set PostgreSQL Prometheus exporter containers' Security Context runAsNonRoot + ## + containerSecurityContext: + enabled: true + runAsUser: 1001 + runAsNonRoot: true + ## Configure extra options for PostgreSQL Prometheus exporter containers' liveness, readiness and startup probes + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/#configure-probes + ## @param metrics.livenessProbe.enabled Enable livenessProbe on PostgreSQL Prometheus exporter containers + ## @param metrics.livenessProbe.initialDelaySeconds Initial delay seconds for livenessProbe + ## @param metrics.livenessProbe.periodSeconds Period seconds for livenessProbe + ## @param metrics.livenessProbe.timeoutSeconds Timeout seconds for livenessProbe + ## @param metrics.livenessProbe.failureThreshold Failure threshold for livenessProbe + ## @param metrics.livenessProbe.successThreshold Success threshold for livenessProbe + ## + livenessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 6 + successThreshold: 1 + ## @param metrics.readinessProbe.enabled Enable readinessProbe on PostgreSQL Prometheus exporter containers + ## @param metrics.readinessProbe.initialDelaySeconds Initial delay seconds for readinessProbe + ## @param metrics.readinessProbe.periodSeconds Period seconds for readinessProbe + ## @param metrics.readinessProbe.timeoutSeconds Timeout seconds for readinessProbe + ## @param metrics.readinessProbe.failureThreshold Failure threshold for readinessProbe + ## @param metrics.readinessProbe.successThreshold Success threshold for readinessProbe + ## + readinessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 6 + successThreshold: 1 + ## @param metrics.startupProbe.enabled Enable startupProbe on PostgreSQL Prometheus exporter containers + ## @param metrics.startupProbe.initialDelaySeconds Initial delay seconds for startupProbe + ## @param metrics.startupProbe.periodSeconds Period seconds for startupProbe + ## @param metrics.startupProbe.timeoutSeconds Timeout seconds for startupProbe + ## @param metrics.startupProbe.failureThreshold Failure threshold for startupProbe + ## @param metrics.startupProbe.successThreshold Success threshold for startupProbe + ## + startupProbe: + enabled: false + initialDelaySeconds: 10 + periodSeconds: 10 + timeoutSeconds: 1 + failureThreshold: 15 + successThreshold: 1 + ## @param metrics.customLivenessProbe Custom livenessProbe that overrides the default one + ## + customLivenessProbe: {} + ## @param metrics.customReadinessProbe Custom readinessProbe that overrides the default one + ## + customReadinessProbe: {} + ## @param metrics.customStartupProbe Custom startupProbe that overrides the default one + ## + customStartupProbe: {} + ## @param metrics.containerPorts.metrics PostgreSQL Prometheus exporter metrics container port + ## + containerPorts: + metrics: 9187 + ## PostgreSQL Prometheus exporter resource requests and limits + ## ref: https://kubernetes.io/docs/user-guide/compute-resources/ + ## @param metrics.resources.limits The resources limits for the PostgreSQL Prometheus exporter container + ## @param metrics.resources.requests The requested resources for the PostgreSQL Prometheus exporter container + ## + resources: + limits: {} + requests: {} + ## Service configuration + ## + service: + ## @param metrics.service.ports.metrics PostgreSQL Prometheus Exporter service port + ## + ports: + metrics: 9187 + ## @param metrics.service.clusterIP Static clusterIP or None for headless services + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#choosing-your-own-ip-address + ## + clusterIP: "" + ## @param metrics.service.sessionAffinity Control where client requests go, to the same pod or round-robin + ## Values: ClientIP or None + ## ref: https://kubernetes.io/docs/user-guide/services/ + ## + sessionAffinity: None + ## @param metrics.service.annotations [object] Annotations for Prometheus to auto-discover the metrics endpoint + ## + annotations: + prometheus.io/scrape: "true" + prometheus.io/port: "{{ .Values.metrics.service.ports.metrics }}" + ## Prometheus Operator ServiceMonitor configuration + ## + serviceMonitor: + ## @param metrics.serviceMonitor.enabled Create ServiceMonitor Resource for scraping metrics using Prometheus Operator + ## + enabled: false + ## @param metrics.serviceMonitor.namespace Namespace for the ServiceMonitor Resource (defaults to the Release Namespace) + ## + namespace: "" + ## @param metrics.serviceMonitor.interval Interval at which metrics should be scraped. + ## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#endpoint + ## + interval: "" + ## @param metrics.serviceMonitor.scrapeTimeout Timeout after which the scrape is ended + ## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#endpoint + ## + scrapeTimeout: "" + ## @param metrics.serviceMonitor.labels Additional labels that can be used so ServiceMonitor will be discovered by Prometheus + ## + labels: {} + ## @param metrics.serviceMonitor.selector Prometheus instance selector labels + ## ref: https://github.com/bitnami/charts/tree/master/bitnami/prometheus-operator#prometheus-configuration + ## + selector: {} + ## @param metrics.serviceMonitor.relabelings RelabelConfigs to apply to samples before scraping + ## + relabelings: [] + ## @param metrics.serviceMonitor.metricRelabelings MetricRelabelConfigs to apply to samples before ingestion + ## + metricRelabelings: [] + ## @param metrics.serviceMonitor.honorLabels Specify honorLabels parameter to add the scrape endpoint + ## + honorLabels: false + ## @param metrics.serviceMonitor.jobLabel The name of the label on the target service to use as the job name in prometheus. + ## + jobLabel: "" + ## Custom PrometheusRule to be defined + ## The value is evaluated as a template, so, for example, the value can depend on .Release or .Chart + ## ref: https://github.com/coreos/prometheus-operator#customresourcedefinitions + ## + prometheusRule: + ## @param metrics.prometheusRule.enabled Create a PrometheusRule for Prometheus Operator + ## + enabled: false + ## @param metrics.prometheusRule.namespace Namespace for the PrometheusRule Resource (defaults to the Release Namespace) + ## + namespace: "" + ## @param metrics.prometheusRule.labels Additional labels that can be used so PrometheusRule will be discovered by Prometheus + ## + labels: {} + ## @param metrics.prometheusRule.rules PrometheusRule definitions + ## Make sure to constraint the rules to the current postgresql service. + ## rules: + ## - alert: HugeReplicationLag + ## expr: pg_replication_lag{service="{{ printf "%s-metrics" (include "common.names.fullname" .) }}"} / 3600 > 1 + ## for: 1m + ## labels: + ## severity: critical + ## annotations: + ## description: replication for {{ include "common.names.fullname" . }} PostgreSQL is lagging by {{ "{{ $value }}" }} hour(s). + ## summary: PostgreSQL replication is lagging by {{ "{{ $value }}" }} hour(s). + ## + rules: [] diff --git a/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/templates/_helpers.tpl b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/templates/_helpers.tpl new file mode 100644 index 00000000..7c75d5de --- /dev/null +++ b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/templates/_helpers.tpl @@ -0,0 +1,43 @@ +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "panoptica.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Create the product name. +*/}} +{{- define "product.name" -}} +{{- default .Chart.Name .Values.global.productNameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Return the target Kubernetes version. +Ref https://github.com/bitnami/charts/blob/main/bitnami/common/templates/_capabilities.tpl +*/}} +{{- define "panoptica.capabilities.kubeVersion" -}} +{{- default .Capabilities.KubeVersion.Version .Values.global.kubeVersionOverride -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for Horizontal Pod Autoscaler. +*/}} +{{- define "panoptica.capabilities.hpa.apiVersion" -}} +{{- if semverCompare "<1.23-0" (include "panoptica.capabilities.kubeVersion" .) -}} +{{- print "autoscaling/v2beta2" -}} +{{- else -}} +{{- print "autoscaling/v2" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for poddisruptionbudget. +*/}} +{{- define "panoptica.capabilities.pdb.apiVersion" -}} +{{- if semverCompare "<1.21-0" (include "panoptica.capabilities.kubeVersion" .) -}} +{{- print "policy/v1beta1" -}} +{{- else -}} +{{- print "policy/v1" -}} +{{- end -}} +{{- end -}} diff --git a/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/templates/apiclarity/_helpers.tpl b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/templates/apiclarity/_helpers.tpl new file mode 100644 index 00000000..50d07167 --- /dev/null +++ b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/templates/apiclarity/_helpers.tpl @@ -0,0 +1,44 @@ +{{- define "apiclarity.name" -}} +{{- "apiclarity" }} +{{- end }} + +{{/* +Create a default fully qualified app name. +*/}} +{{- define "apiclarity.fullname" -}} +{{- "apiclarity" }} +{{- end }} + +{{/* +Common labels +*/}} +{{- define "apiclarity.labels" -}} +helm.sh/chart: {{ include "panoptica.chart" . }} +{{ include "apiclarity.selectorLabels" . }} +{{- if .Chart.AppVersion }} +app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} +{{- end }} +app.kubernetes.io/managed-by: {{ .Release.Service }} +{{- if .Values.global.extraLabels }} +{{ toYaml $.Values.global.extraLabels }} +{{- end }} +{{- end }} + +{{/* +Selector labels +*/}} +{{- define "apiclarity.selectorLabels" -}} +app.kubernetes.io/name: {{ include "apiclarity.name" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +{{- end }} + +{{/* +Create the name of the service account to use +*/}} +{{- define "apiclarity.serviceAccountName" -}} +{{- if .Values.apiclarity.serviceAccount.create }} +{{- default (include "apiclarity.fullname" .) .Values.apiclarity.serviceAccount.name }} +{{- else }} +{{- default "default" .Values.apiclarity.serviceAccount.name }} +{{- end }} +{{- end }} \ No newline at end of file diff --git a/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/templates/apiclarity/clusterrole.yaml b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/templates/apiclarity/clusterrole.yaml new file mode 100644 index 00000000..a7d9099e --- /dev/null +++ b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/templates/apiclarity/clusterrole.yaml @@ -0,0 +1,19 @@ +{{- if .Values.global.isAPISecurityEnabled -}} +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: {{ include "apiclarity.fullname" . }} + namespace: {{ .Release.Namespace }} + labels: + {{- include "apiclarity.labels" . | nindent 4 }} +rules: + - apiGroups: [""] + resources: ["nodes", "services", "pods"] + verbs: ["get", "list", "watch"] + - apiGroups: ["apps"] + resources: ["replicasets", "daemonsets", "deployments"] + verbs: ["get", "list", "watch"] + - apiGroups: [ "batch" ] + resources: [ "jobs" ] + verbs: [ "create", "get", "delete" ] +{{- end }} diff --git a/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/templates/apiclarity/clusterrolebinding.yaml b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/templates/apiclarity/clusterrolebinding.yaml new file mode 100644 index 00000000..50353a86 --- /dev/null +++ b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/templates/apiclarity/clusterrolebinding.yaml @@ -0,0 +1,17 @@ +{{- if .Values.global.isAPISecurityEnabled -}} +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: {{ include "apiclarity.fullname" . }} + namespace: {{ .Release.Namespace }} + labels: + {{- include "apiclarity.labels" . | nindent 4 }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: {{ include "apiclarity.fullname" . }} +subjects: + - kind: ServiceAccount + name: {{ include "apiclarity.serviceAccountName" . }} + namespace: {{ .Release.Namespace }} +{{- end }} diff --git a/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/templates/apiclarity/deployment.yaml b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/templates/apiclarity/deployment.yaml new file mode 100644 index 00000000..c63d13df --- /dev/null +++ b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/templates/apiclarity/deployment.yaml @@ -0,0 +1,165 @@ +{{- if .Values.global.isAPISecurityEnabled -}} +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ include "apiclarity.fullname" . }} + namespace: {{ .Release.Namespace }} + labels: + {{- include "apiclarity.labels" . | nindent 4 }} +spec: + replicas: 1 + selector: + matchLabels: + {{- include "apiclarity.selectorLabels" . | nindent 6 }} + strategy: + type: Recreate + template: + metadata: + {{- if and .Values.global.isConnectionEnforcementEnabled (not .Values.istio.global.alreadyInstalled) }} + annotations: + traffic.sidecar.istio.io/includeOutboundIPRanges: "" + traffic.sidecar.istio.io/includeInboundPorts: "9000" + proxy.istio.io/config: | + holdApplicationUntilProxyStarts: true + proxyMetadata: + ISTIO_META_DNS_CAPTURE: "false" + ISTIO_META_INSECURE_STACKDRIVER_ENDPOINT: "" + TLS_INTERCEPTION: "false" + {{- end }} + labels: + {{- include "apiclarity.selectorLabels" . | nindent 8 }} + {{- if and .Values.global.isConnectionEnforcementEnabled (not .Values.istio.global.alreadyInstalled) }} + sidecar.istio.io/inject: "true" + {{- end }} + spec: + {{- with .Values.apiclarity.imagePullSecrets }} + imagePullSecrets: + {{- toYaml . | nindent 8 }} + {{- end }} + serviceAccountName: {{ include "apiclarity.serviceAccountName" . }} + initContainers: + - name: apiclarity-wait-for-db + image: "{{ index .Values "apiclarity-postgresql" "image" "registry" }}/{{ index .Values "apiclarity-postgresql" "image" "repository" }}:{{ index .Values "apiclarity-postgresql" "image" "tag" }}" + imagePullPolicy: {{ index .Values "apiclarity-postgresql" "image" "pullPolicy" }} + command: ['sh', '-c', 'until pg_isready -h {{ include "apiclarity.fullname" . }}-postgresql -p 5432 -U "postgres" -d "dbname={{ index .Values "apiclarity-postgresql" "auth" "database" }}"; + do echo waiting for database; sleep 2; done;'] + securityContext: + runAsUser: 1001 + resources: + limits: + cpu: 200m + memory: 1000Mi + requests: + cpu: 100m + memory: 200Mi + containers: + - name: apiclarity + image: "{{ default .Values.global.registry .Values.apiclarity.image.registry }}/{{ .Values.apiclarity.image.repository }}:{{ .Values.apiclarity.image.tag }}" + imagePullPolicy: {{ .Values.apiclarity.image.pullPolicy }} + args: + - run + - --log-level + - {{ .Values.apiclarity.logLevel }} + env: + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + # space separated list of response headers to ignore when reconstructing the spec + - name: RESPONSE_HEADERS_TO_IGNORE + valueFrom: + configMapKeyRef: + name: '{{ include "apiclarity.fullname" . }}-headers-to-ignore-configmap' + key: response.headers + # space separated list of request headers to ignore when reconstructing the spec + - name: REQUEST_HEADERS_TO_IGNORE + valueFrom: + configMapKeyRef: + name: '{{ include "apiclarity.fullname" . }}-headers-to-ignore-configmap' + key: request.headers + - name: TRACE_SAMPLING_ENABLED + value: "true" + - name: DB_NAME + value: {{ index .Values "apiclarity-postgresql" "auth" "database" }} + - name: DB_HOST + value: "{{ include "apiclarity.fullname" . }}-postgresql" + - name: DB_PORT_NUMBER + value: "5432" + - name: DB_USER + value: "postgres" + - name: DB_PASS + valueFrom: + secretKeyRef: + name: {{ index .Values "apiclarity-postgresql" "auth" "existingSecret" }} + key: postgres-password + - name: STATE_BACKUP_FILE_NAME + value: /apiclarity/state.gob + - name: ENABLE_TLS + value: "true" + - name: ROOT_CERT_FILE_PATH + value: /etc/root-ca/ca.crt + - name: TLS_SERVER_CERT_FILE_PATH + value: /etc/certs/server.crt + - name: TLS_SERVER_KEY_FILE_PATH + value: /etc/certs/server.key + - name: NOTIFICATION_BACKEND_PREFIX + value: {{ include "controller.fullname" . }}.{{ .Release.Namespace }}:8082 + - name: FUZZER_JOB_TEMPLATE_CONFIG_MAP_NAME + value: "{{ include "apiclarity.fullname" . }}-fuzzer-template" + - name: FUZZER_DEPLOYMENT_TYPE + value: "configmap" + - name: DIFFER_SEND_NOTIFICATION_INTERVAL_SEC + value: {{ .Values.global.sendTelemetriesIntervalSec | quote }} + readinessProbe: + httpGet: + path: /healthz/ready + port: 8081 + periodSeconds: 30 + failureThreshold: 5 + timeoutSeconds: 10 + livenessProbe: + httpGet: + path: /healthz/live + port: 8081 + initialDelaySeconds: 10 + periodSeconds: 30 + failureThreshold: 5 + timeoutSeconds: 10 + securityContext: + {{- toYaml .Values.apiclarity.securityContext | nindent 12 }} + resources: + {{- toYaml .Values.apiclarity.resources | nindent 12 }} + volumeMounts: + - name: apiclarity + mountPath: '/apiclarity' + - name: certs + mountPath: /etc/certs + readOnly: true + - name: root-ca + mountPath: /etc/root-ca + readOnly: true + volumes: + - name: apiclarity + persistentVolumeClaim: + claimName: {{ include "apiclarity.fullname" . }} + - name: certs + secret: + secretName: apiclarity-tls + - name: root-ca + configMap: + name: {{ include "product.name" . }}-root-ca.crt + {{- with .Values.apiclarity.nodeSelector }} + nodeSelector: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.apiclarity.affinity }} + affinity: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.apiclarity.tolerations }} + tolerations: + {{- toYaml . | nindent 8 }} + {{- end }} + securityContext: + {{- toYaml .Values.apiclarity.podSecurityContext | nindent 8 }} +{{- end }} diff --git a/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/templates/apiclarity/fuzzer-template-configmap.yaml b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/templates/apiclarity/fuzzer-template-configmap.yaml new file mode 100644 index 00000000..4e66dde8 --- /dev/null +++ b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/templates/apiclarity/fuzzer-template-configmap.yaml @@ -0,0 +1,67 @@ +{{- if .Values.global.isAPISecurityEnabled -}} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ include "apiclarity.fullname" . }}-fuzzer-template + namespace: {{ .Release.Namespace }} + labels: + {{- include "apiclarity.labels" . | nindent 4 }} +data: + config: |- + apiVersion: batch/v1 + kind: Job + metadata: + namespace: '{{ .Release.Namespace }}' + labels: +{{- toYaml .Values.apiclarity.fuzzer.labels | nindent 8 }} + spec: + backoffLimit: 0 + ttlSecondsAfterFinished: 300 + template: + metadata: + labels: + {{- toYaml .Values.apiclarity.fuzzer.labels | nindent 12 }} + spec: + restartPolicy: Never + volumes: + - name: tmp-volume + emptyDir: {} + - name: root-ca + configMap: + name: {{ include "product.name" . }}-root-ca.crt + containers: + - name: fuzzer + image: "{{ default .Values.global.registry .Values.apiclarity.fuzzer.image.registry }}/{{ .Values.apiclarity.fuzzer.image.repository }}:{{ .Values.apiclarity.fuzzer.image.tag }}" + imagePullPolicy: {{ .Values.apiclarity.fuzzer.image.pullPolicy }} + volumeMounts: + - mountPath: /tmp + name: tmp-volume + - name: root-ca + mountPath: /etc/root-ca + readOnly: true + env: + - name: PLATFORM_TYPE + value: "API_CLARITY" + - name: PLATFORM_HOST + value: https://{{ include "apiclarity.fullname" . }}.{{ .Release.Namespace }}:8443/api + - name: FUZZER + value: "scn-fuzzer,restler" + - name: RESTLER_ROOT_PATH + value: "/tmp" + - name: RESTLER_TOKEN_INJECTOR_PATH + value: "/app/" + - name: DEBUG + value: {{ .Values.apiclarity.fuzzer.debug | quote }} + securityContext: + {{- toYaml .Values.apiclarity.fuzzer.securityContext | nindent 14 }} + resources: + {{- toYaml .Values.apiclarity.fuzzer.resources | nindent 14 }} + {{- with .Values.apiclarity.fuzzer.nodeSelector }} + nodeSelector: + {{- toYaml . | nindent 12 }} + {{- end }} + {{- with .Values.apiclarity.fuzzer.affinity }} + affinity: + {{- toYaml . | nindent 12 }} + {{- end }} +{{- end }} diff --git a/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/templates/apiclarity/headers-to-ignore-configmap.yaml b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/templates/apiclarity/headers-to-ignore-configmap.yaml new file mode 100644 index 00000000..8c2c59ad --- /dev/null +++ b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/templates/apiclarity/headers-to-ignore-configmap.yaml @@ -0,0 +1,117 @@ +{{- if .Values.global.isAPISecurityEnabled -}} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ include "apiclarity.fullname" . }}-headers-to-ignore-configmap + namespace: {{ .Release.Namespace }} + labels: + {{- include "apiclarity.labels" . | nindent 4 }} +data: + request.headers: |- + x-forwarded-proto + x-request-id + x-b3-traceid + x-b3-sampled + x-b3-spanid + x-span-name + x-b3-parentspanid + A-IM + Accept + Accept-Charset + Accept-Encoding + Accept-Language + Accept-Datetime + Access-Control-Request-Method + Access-Control-Request-Headers + Authorization + Cache-Control + Connection + Content-Length + Content-Type + Cookie + Date + Expect + Forwarded + From + Host + If-Match + If-Modified-Since + If-None-Match + If-Range + If-Unmodified-Since + Max-Forwards + Origin + Pragma + Proxy-Authorization + Range + Referer + TE + User-Agent + Upgrade + Via + Warning + response.headers: |- + x-application-context + Access-Control-Allow-Credentials + Access-Control-Allow-Headers + Access-Control-Allow-Methods + Access-Control-Allow-Origin + Access-Control-Expose-Headers + Access-Control-Max-Age + Accept-Ranges + Age + Allow + Alternate-Protocol + Cache-Control + Client-Date + Client-Peer + Client-Response-Num + Connection + Content-Disposition + Content-Encoding + Content-Language + Content-Length + Content-Location + Content-MD5 + Content-Range + Content-Security-Policy + X-Content-Security-Policy + X-WebKit-CSP + Content-Security-Policy-Report-Only + Content-Type + Date + ETag + Expires + HTTP + Keep-Alive + Last-Modified + Link + Location + P3P + Pragma + Proxy-Authenticate + Proxy-Connection + Refresh + Retry-After + Server + Set-Cookie + Status + Strict-Transport-Security + Timing-Allow-Origin + Trailer + Transfer-Encoding + Upgrade + Vary + Via + Warning + WWW-Authenticate + X-Aspnet-Version + X-Content-Type-Options + X-Frame-Options + X-Permitted-Cross-Domain-Policies + X-Pingback + X-Powered-By + X-Robots-Tag + X-UA-Compatible + X-XSS-Protection +{{- end }} diff --git a/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/templates/apiclarity/pvc.yaml b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/templates/apiclarity/pvc.yaml new file mode 100644 index 00000000..a45876d7 --- /dev/null +++ b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/templates/apiclarity/pvc.yaml @@ -0,0 +1,22 @@ +{{- if .Values.global.isAPISecurityEnabled -}} +kind: PersistentVolumeClaim +apiVersion: v1 +metadata: + name: {{ include "apiclarity.fullname" . }} + namespace: {{ .Release.Namespace }} + labels: + {{- include "apiclarity.labels" . | nindent 4 }} +spec: + accessModes: + - {{ .Values.apiclarity.persistence.accessMode | quote }} + resources: + requests: + storage: {{ .Values.apiclarity.persistence.size }} + {{- if .Values.apiclarity.persistence.storageClass }} + {{- if (eq "-" .Values.apiclarity.persistence.storageClass) }} + storageClassName: "" + {{- else }} + storageClassName: {{ .Values.apiclarity.persistence.storageClass }} + {{- end }} + {{- end }} +{{- end }} diff --git a/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/templates/apiclarity/role.yaml b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/templates/apiclarity/role.yaml new file mode 100644 index 00000000..7142a364 --- /dev/null +++ b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/templates/apiclarity/role.yaml @@ -0,0 +1,18 @@ +{{- if .Values.global.isAPISecurityEnabled -}} +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: {{ include "apiclarity.fullname" . }} + namespace: {{ .Release.Namespace }} + labels: + {{- include "apiclarity.labels" . | nindent 4 }} +rules: + # needed for trace sampling manger and fuzzer + - apiGroups: [""] + resources: ["secrets"] + verbs: ["get", "create", "update", "delete"] + # needed for fuzzer + - apiGroups: [""] + resources: ["configmaps"] + verbs: ["get", "list", "watch"] +{{- end }} diff --git a/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/templates/apiclarity/rolebinding.yaml b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/templates/apiclarity/rolebinding.yaml new file mode 100644 index 00000000..73b05fc9 --- /dev/null +++ b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/templates/apiclarity/rolebinding.yaml @@ -0,0 +1,17 @@ +{{- if .Values.global.isAPISecurityEnabled -}} +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: {{ include "apiclarity.fullname" . }} + namespace: {{ .Release.Namespace }} + labels: + {{- include "apiclarity.labels" . | nindent 4 }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: {{ include "apiclarity.fullname" . }} +subjects: + - kind: ServiceAccount + name: {{ include "apiclarity.serviceAccountName" . }} + namespace: {{ .Release.Namespace }} +{{- end }} diff --git a/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/templates/apiclarity/secret.yaml b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/templates/apiclarity/secret.yaml new file mode 100644 index 00000000..613bafff --- /dev/null +++ b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/templates/apiclarity/secret.yaml @@ -0,0 +1,11 @@ +{{- if .Values.global.isAPISecurityEnabled -}} +apiVersion: v1 +kind: Secret +metadata: + name: {{ index .Values "apiclarity-postgresql" "auth" "existingSecret" }} + namespace: {{ .Release.Namespace }} + labels: + {{- include "apiclarity.labels" . | nindent 4 }} +data: + postgres-password: {{ .Values.controller.secret.sharedSecret | b64enc }} +{{- end }} diff --git a/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/templates/apiclarity/service-lb.yaml b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/templates/apiclarity/service-lb.yaml new file mode 100644 index 00000000..44c716dc --- /dev/null +++ b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/templates/apiclarity/service-lb.yaml @@ -0,0 +1,18 @@ +{{- if and .Values.global.isAPISecurityEnabled .Values.apiclarity.traceSource.external -}} +apiVersion: v1 +kind: Service +metadata: + name: {{ include "apiclarity.fullname" . }}-external + namespace: {{ .Release.Namespace }} + labels: + {{- include "apiclarity.labels" . | nindent 4 }} +spec: + type: LoadBalancer + ports: + - name: https-external + port: 10443 + protocol: TCP + targetPort: 10443 + selector: + {{- include "apiclarity.selectorLabels" . | nindent 4 }} +{{- end }} diff --git a/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/templates/apiclarity/service.yaml b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/templates/apiclarity/service.yaml new file mode 100644 index 00000000..d93d11b8 --- /dev/null +++ b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/templates/apiclarity/service.yaml @@ -0,0 +1,41 @@ +{{- if .Values.global.isAPISecurityEnabled -}} +apiVersion: v1 +kind: Service +metadata: + name: {{ include "apiclarity.fullname" . }} + namespace: {{ .Release.Namespace }} + labels: + {{- include "apiclarity.labels" . | nindent 4 }} +spec: + ports: + - name: http-trace-server + port: 9000 + protocol: TCP + targetPort: 9000 + - name: https-trace-server + port: 9443 + protocol: TCP + targetPort: 9443 + - name: http-backend + port: 8080 + protocol: TCP + targetPort: 8080 + - name: https-backend + port: 8443 + protocol: TCP + targetPort: 8443 + - name: http-trace-sampling-manager-server + port: 9990 + protocol: TCP + targetPort: 9990 + - name: grpc-trace-sampling-manager-server + port: 9991 + protocol: TCP + targetPort: 9991 + - name: healthz + port: 8081 + protocol: TCP + targetPort: 8081 + selector: + {{- include "apiclarity.selectorLabels" . | nindent 4 }} +{{- end }} diff --git a/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/templates/apiclarity/serviceaccount.yaml b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/templates/apiclarity/serviceaccount.yaml new file mode 100644 index 00000000..5173cf4e --- /dev/null +++ b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/templates/apiclarity/serviceaccount.yaml @@ -0,0 +1,13 @@ +{{- if and .Values.global.isAPISecurityEnabled .Values.apiclarity.serviceAccount.create -}} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ include "apiclarity.serviceAccountName" . }} + namespace: {{ .Release.Namespace }} + labels: + {{- include "apiclarity.labels" . | nindent 4 }} + {{- with .Values.apiclarity.serviceAccount.annotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} +{{- end }} diff --git a/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/templates/apiclarity/tls.yaml b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/templates/apiclarity/tls.yaml new file mode 100644 index 00000000..23050eb0 --- /dev/null +++ b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/templates/apiclarity/tls.yaml @@ -0,0 +1,71 @@ +{{- if .Values.global.isAPISecurityEnabled -}} +{{- $caCert := "" }} +{{- $controllerServerCert := "" }} +{{- $controllerServerKey := "" }} +{{- $apiclarityServerCert := "" }} +{{- $apiclarityServerKey := "" }} +{{- $productName := include "product.name" . -}} +{{- $rootCAConfigName := print $productName "-root-ca.crt" -}} +{{- $rootCAConfig := (lookup "v1" "ConfigMap" .Release.Namespace $rootCAConfigName) }} +{{- $controllerName := include "controller.fullname" . -}} +{{- $controllerCertSecretName := print $controllerName "-tls" -}} +{{- $controllerCertSecret := (lookup "v1" "Secret" .Release.Namespace $controllerCertSecretName) }} +{{- $apiclarityCertSecret := (lookup "v1" "Secret" .Release.Namespace "apiclarity-tls") }} +{{- if and $rootCAConfig $controllerCertSecret $apiclarityCertSecret }} +{{- $caCert = get $rootCAConfig.data "ca.crt" -}} +{{- $controllerServerCert = get $controllerCertSecret.data "server.crt" -}} +{{- $controllerServerKey = get $controllerCertSecret.data "server.key" -}} +{{- $apiclarityServerCert = get $apiclarityCertSecret.data "server.crt" -}} +{{- $apiclarityServerKey = get $apiclarityCertSecret.data "server.key" -}} +{{- else }} +{{- $controllerCN := include "controller.fullname" . -}} +{{- $controllerExternalDNS := print $controllerCN "." .Release.Namespace -}} +{{- $controllerExternalDNSWithSVC := print $controllerExternalDNS ".svc" -}} +{{- $controllerExternalDNSFullFQDN := print $controllerExternalDNS ".svc.cluster.local" -}} +{{- $apiclarityCN := include "apiclarity.fullname" . -}} +{{- $apiclarityExternalDNS := print $apiclarityCN "." .Release.Namespace -}} +{{- $apiclarityExternalDNSWithSVC := print $apiclarityExternalDNS ".svc" -}} +{{- $apiclarityExternalDNSFullFQDN := print $apiclarityExternalDNS ".svc.cluster.local" -}} +{{- $ca := genCA "root-ca" 3650 -}} +{{- $controllerCert := genSignedCert $controllerCN (list "127.0.0.1") (list $controllerCN $controllerExternalDNS $controllerExternalDNSWithSVC $controllerExternalDNSFullFQDN ) 730 $ca -}} +{{- $apiclarityCert := genSignedCert $apiclarityCN (list "127.0.0.1") (list $apiclarityCN $apiclarityExternalDNS $apiclarityExternalDNSWithSVC $apiclarityExternalDNSFullFQDN ) 730 $ca -}} +{{- $caCert = $ca.Cert -}} +{{- $controllerServerCert = ($controllerCert.Cert | b64enc) -}} +{{- $controllerServerKey = ($controllerCert.Key | b64enc) -}} +{{- $apiclarityServerCert = ($apiclarityCert.Cert | b64enc) -}} +{{- $apiclarityServerKey = ($apiclarityCert.Key | b64enc) -}} +{{- end }} +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ include "product.name" . }}-root-ca.crt + namespace: {{ .Release.Namespace }} + labels: + {{- include "controller.labels" . | nindent 4 }} +data: + ca.crt: | + {{- $caCert | nindent 4 }} +--- +apiVersion: v1 +kind: Secret +metadata: + name: {{ include "controller.fullname" . }}-tls + namespace: {{ .Release.Namespace }} + labels: + {{- include "controller.labels" . | nindent 4 }} +data: + server.crt: {{ $controllerServerCert }} + server.key: {{ $controllerServerKey }} +--- +apiVersion: v1 +kind: Secret +metadata: + name: apiclarity-tls + namespace: {{ .Release.Namespace }} + labels: + {{- include "controller.labels" . | nindent 4 }} +data: + server.crt: {{ $apiclarityServerCert }} + server.key: {{ $apiclarityServerKey }} +{{- end }} \ No newline at end of file diff --git a/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/templates/apiclarity/wasm-envoy-filter.yaml b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/templates/apiclarity/wasm-envoy-filter.yaml new file mode 100644 index 00000000..e2175c44 --- /dev/null +++ b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/templates/apiclarity/wasm-envoy-filter.yaml @@ -0,0 +1,150 @@ +{{- if and .Values.global.isAPISecurityEnabled .Values.apiclarity.traceSource.istio -}} +apiVersion: networking.istio.io/v1alpha3 +kind: EnvoyFilter +metadata: + name: {{ include "product.name" . }}-filter + namespace: istio-system + labels: + owner: {{ include "product.name" . }} + component: trace-analyzer +spec: + {{- if .Values.global.isContainerSecurityEnabled }} + workloadSelector: + labels: + {{ include "product.name" . }}.io/envoy-auth-config: pod + {{- end }} + configPatches: + - applyTo: HTTP_FILTER + match: + context: SIDECAR_OUTBOUND + listener: + filterChain: + filter: + name: envoy.filters.network.http_connection_manager + subFilter: + name: envoy.filters.http.router + patch: + operation: INSERT_BEFORE + value: + name: envoy.filters.http.wasm + typedConfig: + '@type': type.googleapis.com/udpa.type.v1.TypedStruct + typeUrl: type.googleapis.com/envoy.extensions.filters.http.wasm.v3.Wasm + value: + config: + name: {{ include "product.name" . }}-outbound-filter + rootId: {{ include "product.name" . }}-outbound-filter + configuration: + "@type": "type.googleapis.com/google.protobuf.StringValue" + value: | + {"trace_sampling_enabled": "true"} + vmConfig: + code: + remote: + http_uri: + cluster: wasm_file_server + timeout: 10s + uri: "{{ include "controller.fullname" . }}.{{ .Release.Namespace }}.svc.cluster.local:8081/http-trace-filter.wasm" + sha256: {{ .Values.apiclarity.traceWasmFilterSHA256 }} + runtime: envoy.wasm.runtime.v8 + vmId: {{ include "product.name" . }}-outbound-filter + - applyTo: CLUSTER + match: + context: SIDECAR_OUTBOUND + patch: + operation: ADD + value: + connect_timeout: 1s + dns_lookup_family: V4_ONLY + dns_refresh_rate: 5s + load_assignment: + cluster_name: wasm_file_server + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: {{ include "controller.fullname" . }}.{{ .Release.Namespace }}.svc.cluster.local + port_value: 8081 + name: wasm_file_server + type: LOGICAL_DNS + - applyTo: CLUSTER + match: + context: SIDECAR_OUTBOUND + patch: + operation: ADD + value: # cluster specification + name: trace_analyzer + type: LOGICAL_DNS + connect_timeout: 0.5s + lb_policy: ROUND_ROBIN + load_assignment: + cluster_name: trace_analyzer + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + protocol: TCP + address: {{ include "apiclarity.fullname" . }}.{{ .Release.Namespace }}.svc.cluster.local + port_value: 9000 +{{- if and .Values.global.isConnectionEnforcementEnabled (not .Values.istio.global.alreadyInstalled) }} + transport_socket: + name: envoy.transport_sockets.tls + typed_config: + "@type": type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.UpstreamTlsContext + sni: outbound_.9000_._.{{ include "apiclarity.fullname" . }}.{{ .Release.Namespace }}.svc.cluster.local + common_tls_context: + alpn_protocols: + - "istio-peer-exchange" + - "istio" + tls_certificate_sds_secret_configs: + - name: default + sds_config: + api_config_source: + api_type: "GRPC" + grpc_services: + - envoy_grpc: + cluster_name: "sds-grpc" + set_node_on_first_message_only: true + transport_api_version: V3 + initial_fetch_timeout: 0s + resource_api_version: V3 + combined_validation_context: + default_validation_context: + match_subject_alt_names: + - exact: spiffe://cluster.local/ns/{{ .Release.Namespace }}/sa/{{ include "apiclarity.serviceAccountName" . }} + validation_context_sds_secret_config: + name: "ROOTCA" + sds_config: + api_config_source: + api_type: "GRPC" + grpc_services: + - envoy_grpc: + cluster_name: "sds-grpc" + set_node_on_first_message_only: true + transport_api_version: V3 + initial_fetch_timeout: 0s + resource_api_version: V3 +{{- end }} + - applyTo: CLUSTER + match: + context: SIDECAR_OUTBOUND + patch: + operation: ADD + value: # cluster specification + name: trace-sampling-manager + type: LOGICAL_DNS + connect_timeout: 0.5s + lb_policy: ROUND_ROBIN + load_assignment: + cluster_name: trace-sampling-manager + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + protocol: TCP + address: {{ include "apiclarity.fullname" . }}.{{ .Release.Namespace }}.svc.cluster.local + port_value: 9990 +{{- end }} \ No newline at end of file diff --git a/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/templates/controller/_helpers.tpl b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/templates/controller/_helpers.tpl new file mode 100644 index 00000000..e7faaeaf --- /dev/null +++ b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/templates/controller/_helpers.tpl @@ -0,0 +1,59 @@ +{{/* +Expand the name of the chart. +*/}} +{{- define "controller.name" -}} +{{- default .Chart.Name .Values.controller.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "controller.fullname" -}} +{{- if .Values.controller.fullnameOverride -}} +{{- .Values.controller.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .Chart.Name .Values.controller.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- printf .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Common labels +*/}} +{{- define "controller.labels" -}} +helm.sh/chart: {{ include "panoptica.chart" . }} +{{ include "controller.selectorLabels" . }} +{{- if .Chart.AppVersion }} +app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} +{{- end }} +app.kubernetes.io/managed-by: {{ .Release.Service }} +{{- if .Values.global.extraLabels }} +{{ toYaml $.Values.global.extraLabels }} +{{- end }} +{{- end }} + +{{/* +Selector labels +*/}} +{{- define "controller.selectorLabels" -}} +app: {{ include "controller.name" . }} +app.kubernetes.io/name: {{ include "controller.name" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +{{- end }} + +{{/* +Create the name of the service account to use +*/}} +{{- define "controller.serviceAccountName" -}} +{{- if .Values.controller.serviceAccount.create }} +{{- default (include "controller.fullname" .) .Values.controller.serviceAccount.name }} +{{- else }} +{{- default "default" .Values.controller.serviceAccount.name }} +{{- end }} +{{- end }} diff --git a/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/templates/controller/cis-docker-benchmark-template-configmap.yaml b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/templates/controller/cis-docker-benchmark-template-configmap.yaml new file mode 100644 index 00000000..91990419 --- /dev/null +++ b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/templates/controller/cis-docker-benchmark-template-configmap.yaml @@ -0,0 +1,75 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: cis-docker-benchmark-template + namespace: {{ .Release.Namespace }} + labels: + {{- include "controller.labels" . | nindent 4 }} +data: + config: |- + apiVersion: batch/v1 + kind: Job + metadata: + namespace: {{ default .Release.Namespace .Values.imageAnalysis.jobDefaultNamespace }} + labels: + app: cis-docker-benchmark + sidecar.istio.io/inject: "false" + kuma.io/sidecar-injection: "disabled" + spec: + backoffLimit: 0 + activeDeadlineSeconds: 1200 + ttlSecondsAfterFinished: 30 + template: + metadata: + labels: + app: cis-docker-benchmark + sidecar.istio.io/inject: "false" + kuma.io/sidecar-injection: "disabled" + spec: + restartPolicy: Never + initContainers: + - name: ensure-forwarding-server + image: "{{ default .Values.global.registry .Values.busybox.image.registry }}/{{ .Values.busybox.image.repository }}:{{ .Values.busybox.image.tag }}" + imagePullPolicy: {{ .Values.busybox.image.pullPolicy }} + securityContext: + {{- toYaml .Values.busybox.securityContext | nindent 16 }} + args: + - /bin/sh + - -c + - > + set -x; + while [ 1 ]; do + nc {{ include "controller.fullname" . }}.{{ .Release.Namespace }}.svc.cluster.local 25235; + if [ $? -eq 0 ]; then + break; + fi + echo waiting for images analysis SBOM result forwarding server; sleep 5; + done; + containers: + - name: cis-docker-benchmark + image: {{ default .Values.global.registry .Values.imageAnalysis.cisDockerBenchmark.image.registry }}/{{ .Values.imageAnalysis.cisDockerBenchmark.image.repository }}:{{ .Values.imageAnalysis.cisDockerBenchmark.image.tag }} + env: + - name: RESULT_SERVICE_ADDR + value: {{ include "controller.fullname" . }}.{{ .Release.Namespace }}.svc.cluster.local:25235 + args: + - scan + volumeMounts: + - mountPath: /tmp + name: tmp-volume + securityContext: + {{- toYaml .Values.imageAnalysis.cisDockerBenchmark.securityContext | nindent 16 }} + resources: + {{- toYaml .Values.imageAnalysis.cisDockerBenchmark.resources | nindent 16 }} + volumes: + - name: tmp-volume + emptyDir: {} + securityContext: + {{- toYaml .Values.imageAnalysis.cisDockerBenchmark.podSecurityContext | nindent 12 }} + {{- with .Values.imageAnalysis.nodeSelector }} + nodeSelector: + {{- toYaml . | nindent 12 }} + {{- end }} + {{- with .Values.imageAnalysis.tolerations }} + tolerations: + {{- toYaml . | nindent 12 }} + {{- end }} diff --git a/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/templates/controller/cluster-configuration-configmap.yaml b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/templates/controller/cluster-configuration-configmap.yaml new file mode 100644 index 00000000..f913e234 --- /dev/null +++ b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/templates/controller/cluster-configuration-configmap.yaml @@ -0,0 +1,100 @@ +{{- if and .Values.global.isConnectionEnforcementEnabled (not .Values.global.isContainerSecurityEnabled) }} +{{- fail "Connection enforcement can only be enabled if K8s security is enabled!" }} +{{- end }} +{{- if and .Values.global.validateDeployerPolicy (not .Values.global.isContainerSecurityEnabled) }} +{{- fail "Deployer Policy enforcement can only be enabled if K8s security is enabled!" }} +{{- end }} +{{- if and .Values.global.k8sEventsEnabled (not .Values.global.isContainerSecurityEnabled) }} +{{- fail "K8s Events monitoring can only be enabled if K8s security is enabled!" }} +{{- end }} +{{- if and .Values.global.autoLabelEnabled (not .Values.global.isContainerSecurityEnabled) }} +{{- fail "Namespace auto-labeling can only be enabled if K8s security is enabled!" }} +{{- end }} +{{- if and .Values.global.isContainerSecurityEnable .Values.global.ciImageSignatureValidation .Values.global.ciImageValidation }} +{{- fail "Cluster cannot have both CI image validation and CI image signature validation enabled!" }} +{{- end }} +{{- if .Values.global.tokenInjectionEnabled}} +{{- if not .Values.global.isAPISecurityEnabled }} +{{- fail "Token injection cannot be enabled when API security is disabled!" }} +{{- end }} +{{- if not .Values.global.isContainerSecurityEnabled }} +{{- fail "Token injection cannot be enabled when K8s security is disabled!" }} +{{- end }} +{{- if not .Values.global.skipCrossChartsVerification}} +{{- $statefulset := (lookup "apps/v1" "StatefulSet" "securecn-vault" "vault") }} +{{- if not $statefulset }} +{{- fail "Token injection cannot be enabled when Panoptica vault chart is not deployed (missing vault statefulset in securecn-vault namespace), please first deploy the missing chart." }} +{{- end }} +{{- end }} +{{- end }} +{{- if .Values.global.isConnectionEnforcementEnabled }} +{{- if semverCompare ">1.19-0" .Values.istio.global.version }} +{{- .Values.istio.global.version | printf "Maximun supported istio version is 1.19.x while istio.global.version is set to %s" | fail }} +{{- end }} +{{- if not .Values.global.skipCrossChartsVerification}} +{{- if not .Values.istio.global.alreadyInstalled }} +{{- $istiodDeployment := (lookup "apps/v1" "Deployment" "istio-system" "istiod") }} +{{- if not $istiodDeployment }} +{{- fail "Connection enforcement cannot be enabled when Panoptica istio chart is not deployed (missing istiod deployment in istio-system namespace), please first deploy the missing chart." }} +{{- end }} +{{- end }} +{{- end }} +{{- end }} +{{- if and .Values.istio.global.alreadyInstalled .Values.global.enableTlsInspection }} +{{- fail "TLS Inspection can't be enabled when using an upstream istio (istio.global.alreadyInstalled == true)" }} +{{- end }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: cluster-configuration + namespace: {{ .Release.Namespace }} + labels: + {{- include "controller.labels" . | nindent 4 }} +data: + config: |- + kubernetesSecurityEnabled: {{ .Values.global.isContainerSecurityEnabled }} + {{- if .Values.global.isContainerSecurityEnabled }} + enableConnectionsControl: {{ .Values.global.isConnectionEnforcementEnabled }} + validateDeployerPolicy: {{ .Values.global.validateDeployerPolicy }} + k8sEventsEnabled: {{ .Values.global.k8sEventsEnabled }} + cdValidation: {{ .Values.global.cdValidation }} + ciImageSignatureValidation: {{ .Values.global.ciImageSignatureValidation }} + ciImageValidation: {{ .Values.global.ciImageValidation }} + restrictRegistries: {{ .Values.global.restrictRegistries }} + tokenInjectionEnabled: {{ .Values.global.tokenInjectionEnabled }} + connectionFailPolicyAllow: {{ .Values.global.connectionFailPolicyAllow }} + environmentFailurePolicyAllow: {{ .Values.global.environmentFailurePolicyAllow }} + sshMonitorEnabled: {{ .Values.global.isSSHMonitorEnabled }} + {{- else }} + {{- /* If container security is disabled defaults values should be true (meaning fail-close is false) */}} + connectionFailPolicyAllow: true + environmentFailurePolicyAllow: true + {{- end }} + apiSecurityEnabled: {{ .Values.global.isAPISecurityEnabled }} + {{- if .Values.global.isAPISecurityEnabled }} + enableFuzzTests: {{ .Values.apiclarity.fuzzer.enabled }} + installEnvoyTracingSupport: {{ .Values.apiclarity.traceSource.istio }} + supportExternalTraceSource: {{ .Values.apiclarity.traceSource.external }} + {{- end }} + autoLabelEnabled: {{ .Values.global.autoLabelEnabled }} + httpsProxy: "{{ .Values.global.httpsProxy }}" + internalRegistry: "{{ .Values.global.registry }}" + isPersistent: {{ .Values.controller.persistence.enabled }} + minimalNumberOfControllerReplicas: {{ .Values.controller.autoscaling.minReplicas }} + {{- if and .Values.global.isContainerSecurityEnabled .Values.global.isConnectionEnforcementEnabled }} + preserveOriginalSourceIp: {{ .Values.global.preserveOriginalSourceIp }} + tlsInspectionEnabled: {{ .Values.global.enableTlsInspection }} + istioInstallationParameters: + isIstioAlreadyInstalled: {{ .Values.istio.global.alreadyInstalled }} + istioVersion: {{ .Values.istio.global.version }} + serviceDiscoveryIsolationEnabled: {{ .Values.istio.global.serviceDiscoveryIsolationEnabled }} + {{- $configmap := (lookup "v1" "ConfigMap" "istio-system" "cluster-configuration") }} + {{- if $configmap }} + {{- $config := get $configmap.data "config" }} + {{- $config | nindent 4 }} + {{- end }} + {{- end }} + imageAnalysisConfiguration: + sbomEnabled: {{ .Values.imageAnalysis.sbom.enabled }} + cisDockerBenchmarkEnabled: {{ .Values.imageAnalysis.cisDockerBenchmark.enabled }} + parallelScannersNumber: {{ .Values.imageAnalysis.parallelScanners }} \ No newline at end of file diff --git a/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/templates/controller/clusterrole.yaml b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/templates/controller/clusterrole.yaml new file mode 100644 index 00000000..7efbe3ca --- /dev/null +++ b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/templates/controller/clusterrole.yaml @@ -0,0 +1,90 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: {{ include "controller.fullname" . }} + labels: + {{- include "controller.labels" . | nindent 4 }} +rules: + - apiGroups: ["", "apps", "extensions", "batch", "rbac.authorization.k8s.io"] + resources: ["pods", "namespaces", "replicasets", "deployments", "jobs", "cronjobs", "daemonsets", "statefulsets", "nodes", "clusterroles", "clusterrolebindings", "roles", "rolebindings"] + verbs: ["get", "list", "watch"] +{{- if .Values.global.isContainerSecurityEnabled }} + # external service monitor + - apiGroups: ["", "networking.k8s.io", "networking.istio.io"] + resources: ["services", "endpoints", "ingresses", "virtualservices"] + verbs: ["get", "list", "watch"] + - apiGroups: [""] + resources: ["configmaps"] + verbs: ["get", "update", "create"] + - apiGroups: [""] + resources: ["services"] + verbs: ["patch"] + - apiGroups: [""] + resources: ["pods/log"] + verbs: ["get"] + - apiGroups: [""] + resources: ["pods/exec"] + verbs: ["create"] + - apiGroups: [""] + resources: ["secrets"] + verbs: ["get", "watch", "list", "create", "update", "delete"] + - apiGroups: ["batch"] + resources: ["jobs"] + verbs: ["create", "delete"] +{{- if .Values.global.isConnectionEnforcementEnabled }} + - apiGroups: ["networking.istio.io"] + resources: ["destinationrules", "serviceentries", "virtualservices", "envoyfilters"] + verbs: ["create", "patch", "get", "list", "delete"] + - apiGroups: ["security.istio.io"] + resources: ["peerauthentications"] + verbs: ["create", "patch", "list", "delete"] +{{- end }} + - apiGroups: ["admissionregistration.k8s.io"] + resources: ["validatingwebhookconfigurations"] + verbs: ["get", "patch", "create", "delete", "list", "watch"] + - apiGroups: ["admissionregistration.k8s.io"] + resources: ["mutatingwebhookconfigurations"] + verbs: ["get", "patch", "create", "delete", "list", "watch"] +{{- if .Values.global.isExternalCaEnabled }} + # CSR signer + - apiGroups: ["certificates.k8s.io"] + resources: ["certificatesigningrequests"] + verbs: ["get", "list", "watch"] + - apiGroups: ["certificates.k8s.io"] + resources: ["certificatesigningrequests/status"] + verbs: ["update"] + - apiGroups: ["certificates.k8s.io"] + resources: ["signers"] + resourceNames: ["{{ include "product.name" . }}.io/*"] # all signers in the '{{ include "product.name" . }}.io' domain + verbs: ["sign"] + - apiGroups: ["cert-manager.io"] + resources: ["certificaterequests", "certificaterequests/status"] + verbs: ["get", "list", "watch", "create", "delete"] +{{- end }} +{{- if .Values.global.isOpenShift }} + - apiGroups: ["rbac.authorization.k8s.io"] + resources: ["clusterroles", "clusterrolebindings"] + verbs: ["get", "create", "update", "delete"] + - apiGroups: ["k8s.cni.cncf.io"] + resources: ["network-attachment-definitions"] + verbs: ["get", "create", "update", "delete"] +{{- end }} + # Allow bank-vaults to extract command metadata from an image. + - apiGroups: [""] + resources: ["serviceaccounts"] + verbs: ["get", "list"] +{{- else if .Values.global.isAPISecurityEnabled }} + # Get kube-system namespace uid + # Watch namespaces for protected services discovery + - apiGroups: [""] + resources: ["namespaces"] + verbs: ["get", "list", "watch"] + # Watch nodes for nodes info report + - apiGroups: [""] + resources: ["nodes"] + verbs: ["get", "list", "watch"] + # Watch services for protected services discovery + - apiGroups: [""] + resources: ["services"] + verbs: ["get", "list", "watch"] +{{- end }} diff --git a/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/templates/controller/clusterrolebinding.yaml b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/templates/controller/clusterrolebinding.yaml new file mode 100644 index 00000000..95d46c1d --- /dev/null +++ b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/templates/controller/clusterrolebinding.yaml @@ -0,0 +1,14 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: {{ include "controller.fullname" . }} + labels: + {{- include "controller.labels" . | nindent 4 }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: {{ include "controller.fullname" . }} +subjects: + - kind: ServiceAccount + name: {{ include "controller.serviceAccountName" . }} + namespace: {{ .Release.Namespace }} diff --git a/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/templates/controller/deployment.yaml b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/templates/controller/deployment.yaml new file mode 100644 index 00000000..bef34b4e --- /dev/null +++ b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/templates/controller/deployment.yaml @@ -0,0 +1,265 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ include "controller.fullname" . }} + namespace: {{ .Release.Namespace }} + labels: + {{- include "controller.labels" . | nindent 4 }} +spec: + {{- if not .Values.controller.autoscaling.enabled }} + replicas: {{ .Values.controller.replicaCount }} + {{- end }} + selector: + matchLabels: + {{- include "controller.selectorLabels" . | nindent 6 }} + strategy: + rollingUpdate: + maxSurge: 100% + maxUnavailable: 25% + template: + metadata: + annotations: + checksum/image-analysis-template-config: {{ include (print $.Template.BasePath "/controller/image-analysis-template-configmap.yaml") . | sha256sum }} + checksum/k8s-cis-benchmark-template-config: {{ include (print $.Template.BasePath "/controller/k8s-cis-benchmark-template-configmap.yaml") . | sha256sum }} + {{- if and .Values.global.isAPISecurityEnabled .Values.global.isConnectionEnforcementEnabled (not .Values.istio.global.alreadyInstalled) }} + traffic.sidecar.istio.io/includeOutboundIPRanges: "" + traffic.sidecar.istio.io/includeInboundPorts: "24229,24226" + proxy.istio.io/config: | + holdApplicationUntilProxyStarts: true + proxyMetadata: + ISTIO_META_DNS_CAPTURE: "false" + ISTIO_META_INSECURE_STACKDRIVER_ENDPOINT: "" + TLS_INTERCEPTION: "false" + {{- end }} + labels: + {{- include "controller.selectorLabels" . | nindent 8 }} + {{- if and .Values.global.isAPISecurityEnabled .Values.global.isConnectionEnforcementEnabled (not .Values.istio.global.alreadyInstalled) }} + sidecar.istio.io/inject: "true" + {{- end }} + spec: + {{- with .Values.controller.imagePullSecrets }} + imagePullSecrets: + {{- toYaml . | nindent 8 }} + {{- end }} + serviceAccountName: {{ include "controller.serviceAccountName" . }} + initContainers: + {{- if .Values.global.isAPISecurityEnabled }} + - name: init-apiclarity + image: "{{ default .Values.global.registry .Values.busybox.image.registry }}/{{ .Values.busybox.image.repository }}:{{ .Values.busybox.image.tag }}" + imagePullPolicy: {{ .Values.busybox.image.pullPolicy }} + securityContext: + {{- toYaml .Values.busybox.securityContext | nindent 12 }} + args: + - /bin/sh + - -c + - > + set -x; + while [ $(curl -sw '%{http_code}' "http://{{ include "apiclarity.fullname" . }}.{{ .Release.Namespace }}:8081/healthz/ready" -o /dev/null) -ne 200 ]; do + echo waiting for apiclarity; sleep 2; + done; + {{- end }} + containers: + - name: {{ include "controller.fullname" . }} + image: "{{ default .Values.global.registry .Values.controller.image.registry }}/{{ .Values.controller.image.repository }}:{{ .Values.controller.image.tag }}" + imagePullPolicy: {{ .Values.controller.image.pullPolicy }} + securityContext: + {{- toYaml .Values.controller.securityContext | nindent 12 }} + args: + - run + - --log-level + - {{ .Values.controller.logLevel }} + env: + - name: AGENT_ID + value: {{ required "Agent ID is required!" .Values.controller.agentID | quote }} + - name: MANAGEMENT_HOST + value: {{ .Values.global.mgmtHostname | quote }} + - name: SEND_TELEMETRIES_INTERVAL_SEC + value: {{ .Values.global.sendTelemetriesIntervalSec | quote }} + {{- if .Values.global.validateDeployerPolicy }} + - name: ENABLE_DEPLOYER_POLICY + value: "true" + {{- end }} + {{- if .Values.global.k8sEventsEnabled }} + - name: ENABLE_K8S_EVENTS + value: "true" + {{- end }} + {{- if .Values.controller.persistence.enabled }} + - name: AGENT_PERSISTENT_PATH + value: "/{{ include "controller.fullname" . }}" + - name: IS_PERSISTENT + value: "true" + {{- end }} + {{- if .Values.global.autoLabelEnabled }} + - name: ENABLE_NAMESPACE_AUTO_LABEL + value: "true" + {{- end }} + {{- if and .Values.global.tokenInjectionEnabled .Values.global.isContainerSecurityEnabled }} + - name: API_TOKEN_INJECTION_ENABLED + value: "true" + - name: VAULT_ENV_IMAGE + value: {{ default .Values.global.registry .Values.vaultEnv.image.registry }}/{{ .Values.vaultEnv.image.repository }}:{{ .Values.vaultEnv.image.tag }} + - name: VAULT_ENV_LOG_SERVER + value: {{ include "controller.fullname" . }}.{{ .Release.Namespace }}:9514 + {{- end }} + {{- if .Values.global.isOpenShift }} + - name: IS_OPENSHIFT + value: "true" + {{- end }} + - name: CONNECTION_EVENT_FORWARDING_SERVICE_ADDRESS + value: {{ include "controller.fullname" . }}.{{ .Release.Namespace }}:24234 + - name: CIS_BENCHMARK_CONFIG_MAP_NAMESPACE + value: {{ .Release.Namespace }} + - name: POD_NAMESPACE + value: {{ .Release.Namespace }} + - name: DEPLOYMENT_NAME + value: {{ include "controller.fullname" . }} + - name: SERVICE_NAME + value: {{ include "controller.fullname" . }} + - name: SA_NAME + value: {{ include "controller.serviceAccountName" . }} + - name: PRODUCT_NAME + value: {{ include "product.name" . }} + - name: POD_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: metadata.name + - name: POD_TEMPLATE_HASH + valueFrom: + fieldRef: + fieldPath: metadata.labels['pod-template-hash'] + - name: POD_IP + valueFrom: + fieldRef: + fieldPath: status.podIP + - name: SHARED_SECRET + valueFrom: + secretKeyRef: + name: {{ .Values.controller.secret.existingSecret | default (include "controller.fullname" .) }} + key: {{ .Values.controller.secret.sharedSecretKey | default "SHARED_SECRET" }} + - name: CONNECTION_FAILURE_POLICY_ALLOW + value: {{ .Values.global.connectionFailPolicyAllow | quote }} + - name: VALIDATING_FAILURE_POLICY_ALLOW + value: {{ .Values.global.environmentFailurePolicyAllow | quote }} + {{- if not .Values.global.enableTlsInspection }} + - name: DISABLE_TLS_INTERCEPTION + value: "true" + {{- end }} + {{- if .Values.global.isAPISecurityEnabled }} + - name: TRACE_SUPPORT_ENABLED + value: "true" + - name: TRACE_WASM_FILTER_SHA256 + value: {{ .Values.apiclarity.traceWasmFilterSHA256 }} + - name: APICLARITY_BACKEND_ADDRESS + value: {{ include "apiclarity.fullname" . }}.{{ .Release.Namespace }}:8443 + - name: TRACE_SAMPLING_MANAGER_GRPC_ADDRESS + value: {{ include "apiclarity.fullname" . }}.{{ .Release.Namespace }}:9991 + {{- if .Values.apiclarity.traceSource.external }} + - name: SUPPORT_EXTERNAL_TRACE_SOURCE + value: "true" + - name: APICLARITY_EXPOSED_SERVICE_NAME + value: "{{ include "apiclarity.fullname" . }}-external" + {{- end }} + {{- end }} + - name: K8S_CIS_BENCHMARK_ENABLED + value: {{ .Values.global.k8sCisBenchmarkEnabled | quote }} + - name: CONTAINER_SECURITY_ENABLED + value: {{ .Values.global.isContainerSecurityEnabled | quote }} + - name: SSH_MONITOR_ENABLED + value: {{ .Values.global.isSSHMonitorEnabled | quote }} + {{- if not .Values.global.isConnectionEnforcementEnabled }} + - name: CONNECTION_ENFORCEMENT + value: "Disabled" + {{- end }} + {{- if .Values.global.dummyPlaceHolderForTest }} + - name: AGENT_VERSION_FOR_TEST + value: 1.1.1 + {{- end }} + {{- if .Values.global.httpsProxy }} + - name: https_proxy + value: "{{ .Values.global.httpsProxy }}" + {{- end }} + {{- if .Values.global.httpProxy }} + - name: http_proxy + value: "{{ .Values.global.httpProxy }}" + {{- end }} + - name: IS_ISTIO_ALREADY_INSTALLED + value: {{ .Values.istio.global.alreadyInstalled | quote }} + {{- if .Values.global.isExternalCaEnabled }} + - name: EXTERNAL_CA_SIGNER_NAME + value: "{{ include "product.name" . }}.io/signer" + {{- end }} + - name: ETI_GCR_REPO + value: {{ .Values.global.registry | quote }} + - name: FUZZER_IMAGE_REPO + value: "{{ default .Values.global.registry .Values.apiclarity.fuzzer.image.registry }}/{{ .Values.apiclarity.fuzzer.image.repository }}" + - name: IS_MANAGED_BY_HELM + value: {{ .Values.global.isManagedByHelm | quote }} + resources: + {{- toYaml .Values.controller.resources | nindent 12 }} + readinessProbe: + httpGet: + path: /healthz/ready + port: 8080 + periodSeconds: 30 + failureThreshold: 5 + timeoutSeconds: 10 + livenessProbe: + httpGet: + path: /healthz/live + port: 8080 + initialDelaySeconds: 120 + periodSeconds: 30 + failureThreshold: 5 + timeoutSeconds: 10 + volumeMounts: + {{- if .Values.global.isAPISecurityEnabled }} + - mountPath: /etc/certs + name: certs + readOnly: true + - mountPath: /etc/root-ca + name: root-ca + readOnly: true + {{- end }} + {{- if not .Values.controller.persistence.enabled }} + - mountPath: /tmp + name: tmp-volume + {{- else }} + - name: {{ include "controller.fullname" . }} + mountPath: /{{ include "controller.fullname" . }} + {{- end }} + volumes: + {{- if .Values.global.isAPISecurityEnabled }} + - name: certs + secret: + secretName: {{ include "controller.fullname" . }}-tls + - name: root-ca + configMap: + name: {{ include "product.name" . }}-root-ca.crt + {{- end }} + {{- if not .Values.controller.persistence.enabled }} + - name: tmp-volume + emptyDir: {} + {{- else }} + - name: {{ include "controller.fullname" . }} + persistentVolumeClaim: + {{- if .Values.global.dummyPlaceHolderForTest }} + claimName: {{ include "product.name" . }}-pvc-for-test + {{- else }} + claimName: {{ include "product.name" . }}-{{ .Values.controller.persistence.pvcSuffix }} + {{- end }} + {{- end }} + {{- with .Values.controller.nodeSelector }} + nodeSelector: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.controller.affinity }} + affinity: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.controller.tolerations }} + tolerations: + {{- toYaml . | nindent 8 }} + {{- end }} + securityContext: + {{- toYaml .Values.controller.podSecurityContext | nindent 8 }} diff --git a/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/templates/controller/hpa.yaml b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/templates/controller/hpa.yaml new file mode 100644 index 00000000..85dfad81 --- /dev/null +++ b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/templates/controller/hpa.yaml @@ -0,0 +1,33 @@ +{{- if and .Values.controller.autoscaling.enabled (not .Values.controller.persistence.enabled) -}} +apiVersion: {{ include "panoptica.capabilities.hpa.apiVersion" . }} +kind: HorizontalPodAutoscaler +metadata: + name: {{ include "controller.fullname" . }} + namespace: {{ .Release.Namespace }} + labels: + {{- include "controller.labels" . | nindent 4 }} +spec: + scaleTargetRef: + apiVersion: apps/v1 + kind: Deployment + name: {{ include "controller.fullname" . }} + minReplicas: {{ .Values.controller.autoscaling.minReplicas }} + maxReplicas: {{ .Values.controller.autoscaling.maxReplicas }} + metrics: + {{- if .Values.controller.autoscaling.targetCPUUtilizationPercentage }} + - type: Resource + resource: + name: cpu + target: + type: Utilization + averageUtilization: {{ .Values.controller.autoscaling.targetCPUUtilizationPercentage }} + {{- end }} + {{- if .Values.controller.autoscaling.targetMemoryUtilizationPercentage }} + - type: Resource + resource: + name: memory + target: + type: Utilization + averageUtilization: {{ .Values.controller.autoscaling.targetMemoryUtilizationPercentage }} + {{- end }} +{{- end }} diff --git a/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/templates/controller/image-analysis-clusterrole.yaml b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/templates/controller/image-analysis-clusterrole.yaml new file mode 100644 index 00000000..ea32b78b --- /dev/null +++ b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/templates/controller/image-analysis-clusterrole.yaml @@ -0,0 +1,18 @@ +# ClusterRole to allow the Panoptica controller to perform image analysis. +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: {{ include "controller.fullname" . }}-image-analysis + labels: + {{- include "controller.labels" . | nindent 4 }} +rules: + - apiGroups: [""] + resources: + - "pods" + - "configmaps" + verbs: ["get", "list"] + + - apiGroups: ["batch"] + resources: + - "jobs" + verbs: ["create"] diff --git a/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/templates/controller/image-analysis-clusterrolebinding.yaml b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/templates/controller/image-analysis-clusterrolebinding.yaml new file mode 100644 index 00000000..566a6a54 --- /dev/null +++ b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/templates/controller/image-analysis-clusterrolebinding.yaml @@ -0,0 +1,14 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: {{ include "controller.fullname" . }}-image-analysis + labels: + {{- include "controller.labels" . | nindent 4 }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: {{ include "controller.fullname" . }}-image-analysis +subjects: + - kind: ServiceAccount + name: {{ include "controller.serviceAccountName" . }} + namespace: {{ .Release.Namespace }} diff --git a/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/templates/controller/image-analysis-template-configmap.yaml b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/templates/controller/image-analysis-template-configmap.yaml new file mode 100644 index 00000000..19af8c07 --- /dev/null +++ b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/templates/controller/image-analysis-template-configmap.yaml @@ -0,0 +1,93 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: image-analysis-template + namespace: {{ .Release.Namespace }} + labels: + {{- include "controller.labels" . | nindent 4 }} +data: + config: |- + apiVersion: batch/v1 + kind: Job + metadata: + namespace: {{ default .Release.Namespace .Values.imageAnalysis.jobDefaultNamespace }} + labels: + app: image-analysis + sidecar.istio.io/inject: "false" + kuma.io/sidecar-injection: "disabled" + spec: + backoffLimit: 0 + activeDeadlineSeconds: 1200 + ttlSecondsAfterFinished: 30 + template: + metadata: + labels: + app: image-analysis + sidecar.istio.io/inject: "false" + kuma.io/sidecar-injection: "disabled" + spec: + restartPolicy: Never + initContainers: + - name: ensure-forwarding-server + image: "{{ default .Values.global.registry .Values.busybox.image.registry }}/{{ .Values.busybox.image.repository }}:{{ .Values.busybox.image.tag }}" + imagePullPolicy: {{ .Values.busybox.image.pullPolicy }} + securityContext: + {{- toYaml .Values.busybox.securityContext | nindent 16 }} + args: + - /bin/sh + - -c + - > + set -x; + while [ 1 ]; do + nc {{ include "controller.fullname" . }}.{{ .Release.Namespace }}.svc.cluster.local 25235; + if [ $? -eq 0 ]; then + break; + fi + echo waiting for images analysis SBOM result forwarding server; sleep 5; + done; + containers: + - name: image-analysis + image: "{{ default .Values.global.registry .Values.imageAnalysis.sbom.image.registry }}/{{ .Values.imageAnalysis.sbom.image.repository }}:{{ .Values.imageAnalysis.sbom.image.tag }}" + env: + - name: ANALYZER_LIST + value: "syft" + - name: RESULT_SERVICE_ADDR + value: {{ include "controller.fullname" . }}.{{ .Release.Namespace }}.svc.cluster.local:25235 + - name: REGISTRY_SKIP_VERIFY_TLS + value: {{ .Values.imageAnalysis.registry.skipVerifyTlS | quote }} + - name: REGISTRY_USE_HTTP + value: {{ .Values.imageAnalysis.registry.useHTTP | quote }} + {{- if .Values.global.httpsProxy }} + - name: HTTPS_PROXY + value: "{{ .Values.global.httpsProxy }}" + {{- end }} + {{- if .Values.global.httpProxy }} + - name: HTTP_PROXY + value: "{{ .Values.global.httpProxy }}" + {{- end }} + {{- if or .Values.global.httpProxy .Values.global.httpsProxy }} + - name: NO_PROXY + value: {{ include "controller.fullname" . }}.{{ .Release.Namespace }}:25235 + {{- end }} + args: + - analyze + volumeMounts: + - mountPath: /tmp + name: tmp-volume + securityContext: + {{- toYaml .Values.imageAnalysis.sbom.securityContext | nindent 16 }} + resources: + {{- toYaml .Values.imageAnalysis.sbom.resources | nindent 16 }} + volumes: + - name: tmp-volume + emptyDir: {} + securityContext: + {{- toYaml .Values.imageAnalysis.sbom.podSecurityContext | nindent 12 }} + {{- with .Values.imageAnalysis.nodeSelector }} + nodeSelector: + {{- toYaml . | nindent 12 }} + {{- end }} + {{- with .Values.imageAnalysis.tolerations }} + tolerations: + {{- toYaml . | nindent 12 }} + {{- end }} diff --git a/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/templates/controller/k8s-cis-benchmark-clusterrole.yaml b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/templates/controller/k8s-cis-benchmark-clusterrole.yaml new file mode 100644 index 00000000..33e8945c --- /dev/null +++ b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/templates/controller/k8s-cis-benchmark-clusterrole.yaml @@ -0,0 +1,13 @@ +{{- if .Values.global.k8sCisBenchmarkEnabled -}} +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: {{ include "controller.fullname" . }}-k8s-cis-benchmark + labels: + {{- include "controller.labels" . | nindent 4 }} +rules: + - apiGroups: [""] + resources: + - "nodes" + verbs: ["get", "list"] +{{- end }} diff --git a/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/templates/controller/k8s-cis-benchmark-clusterrolebinding.yaml b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/templates/controller/k8s-cis-benchmark-clusterrolebinding.yaml new file mode 100644 index 00000000..a09520f2 --- /dev/null +++ b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/templates/controller/k8s-cis-benchmark-clusterrolebinding.yaml @@ -0,0 +1,16 @@ +{{- if .Values.global.k8sCisBenchmarkEnabled -}} +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: {{ include "controller.fullname" . }}-k8s-cis-benchmark + labels: + {{- include "controller.labels" . | nindent 4 }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: {{ include "controller.fullname" . }}-k8s-cis-benchmark +subjects: + - kind: ServiceAccount + name: {{ include "controller.serviceAccountName" . }} + namespace: {{ .Release.Namespace }} +{{- end }} diff --git a/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/templates/controller/k8s-cis-benchmark-role.yaml b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/templates/controller/k8s-cis-benchmark-role.yaml new file mode 100644 index 00000000..b95d201b --- /dev/null +++ b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/templates/controller/k8s-cis-benchmark-role.yaml @@ -0,0 +1,19 @@ +{{- if .Values.global.k8sCisBenchmarkEnabled -}} +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: {{ include "controller.fullname" . }}-k8s-cis-benchmark + namespace: {{ .Release.Namespace }} + labels: + {{- include "controller.labels" . | nindent 4 }} +rules: + - apiGroups: [""] + resources: + - "configmaps" + verbs: ["get", "list"] + + - apiGroups: ["batch"] + resources: + - "jobs" + verbs: ["create", "get", "list"] +{{- end }} diff --git a/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/templates/controller/k8s-cis-benchmark-rolebinding.yaml b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/templates/controller/k8s-cis-benchmark-rolebinding.yaml new file mode 100644 index 00000000..2f92fbb1 --- /dev/null +++ b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/templates/controller/k8s-cis-benchmark-rolebinding.yaml @@ -0,0 +1,17 @@ +{{- if .Values.global.k8sCisBenchmarkEnabled -}} +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: {{ include "controller.fullname" . }}-k8s-cis-benchmark + namespace: {{ .Release.Namespace }} + labels: + {{- include "controller.labels" . | nindent 4 }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: {{ include "controller.fullname" . }}-k8s-cis-benchmark +subjects: + - kind: ServiceAccount + name: {{ include "controller.serviceAccountName" . }} + namespace: {{ .Release.Namespace }} +{{- end }} diff --git a/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/templates/controller/k8s-cis-benchmark-template-configmap.yaml b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/templates/controller/k8s-cis-benchmark-template-configmap.yaml new file mode 100644 index 00000000..401eb684 --- /dev/null +++ b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/templates/controller/k8s-cis-benchmark-template-configmap.yaml @@ -0,0 +1,80 @@ +{{- if .Values.global.k8sCisBenchmarkEnabled -}} +apiVersion: v1 +kind: ConfigMap +metadata: + name: k8s-cis-benchmark-template + namespace: {{ .Release.Namespace }} + labels: + {{- include "controller.labels" . | nindent 4 }} +data: + config: |- + apiVersion: batch/v1 + kind: Job + metadata: + name: k8s-cis-benchmark + labels: + k8s-cis-benchmark: k8s-cis-benchmark + spec: + activeDeadlineSeconds: 1200 + ttlSecondsAfterFinished: 30 + template: + spec: + hostPID: true + restartPolicy: Never + tolerations: + - operator: Exists + initContainers: + - name: init-k8s-cis-benchmark + image: "{{ default .Values.global.registry .Values.busybox.image.registry }}/{{ .Values.busybox.image.repository }}:{{ .Values.busybox.image.tag }}" + imagePullPolicy: {{ .Values.busybox.image.pullPolicy }} + securityContext: + {{- toYaml .Values.busybox.securityContext | nindent 16 }} + args: + - /bin/sh + - -c + - > + set -x; + while [ 1 ]; do + nc {{ include "controller.fullname" . }}.{{ .Release.Namespace }}.svc.cluster.local 24235; + if [ $? -eq 0 ]; then + break; + fi + echo waiting for k8s CIS benchmark result forwarding server; sleep 5; + done; + containers: + - name: k8s-cis-benchmark + image: {{ default .Values.global.registry .Values.k8sCISBenchmark.image.registry }}/{{ .Values.k8sCISBenchmark.image.repository }}:{{ .Values.k8sCISBenchmark.image.tag }} + env: + - name: K8S_NODE_NAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + - name: K8S_NODE_IP + valueFrom: + fieldRef: + fieldPath: status.hostIP + - name: RPC_ADDRESS + value: {{ include "controller.fullname" . }}.{{ .Release.Namespace }}.svc.cluster.local:24235 + command: + - k8s-cis-benchmark + volumeMounts: + - name: var-lib-kubelet + mountPath: /var/lib/kubelet + readOnly: true + - name: etc-systemd + mountPath: /etc/systemd + readOnly: true + - name: etc-kubernetes + mountPath: /etc/kubernetes + readOnly: true + volumes: + - name: var-lib-kubelet + hostPath: + path: "/var/lib/kubelet" + - name: etc-systemd + hostPath: + path: "/etc/systemd" + - name: etc-kubernetes + hostPath: + path: "/etc/kubernetes" +{{- end }} diff --git a/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/templates/controller/k8s-discovery-clusterrole.yaml b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/templates/controller/k8s-discovery-clusterrole.yaml new file mode 100644 index 00000000..3ef8e2ec --- /dev/null +++ b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/templates/controller/k8s-discovery-clusterrole.yaml @@ -0,0 +1,73 @@ +# ClusterRole to allow the Panoptica agent to discover all the required +# resources on the kubernetes cluster. +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: {{ include "controller.fullname" . }}-k8s-discovery + labels: + {{- include "controller.labels" . | nindent 4 }} +rules: + - apiGroups: [""] + resources: + - "namespaces" + - "nodes" + - "pods" + - "podtemplates" + - "configmaps" + - "services" + - "serviceaccounts" + - "endpoints" + - "persistentvolumeclaims" + - "persistentvolumes" + - "replicationcontrollers" + verbs: ["get", "list"] + + - apiGroups: ["rbac.authorization.k8s.io"] + resources: + - "clusterroles" + - "clusterrolebindings" + - "roles" + - "rolebindings" + verbs: ["get", "list"] + + - apiGroups: ["admissionregistration.k8s.io"] + resources: + - "mutatingwebhookconfigurations" + - "validatingwebhookconfigurations" + verbs: ["get", "list"] + + - apiGroups: ["apiextensions.k8s.io"] + resources: + - "customresourcedefinitions" + verbs: ["get", "list"] + + - apiGroups: ["storage.k8s.io"] + resources: + - "storageclasses" + verbs: ["get", "list"] + + - apiGroups: ["apps"] + resources: + - "deployments" + - "statefulsets" + - "replicasets" + - "daemonsets" + verbs: ["get", "list"] + + - apiGroups: ["networking.k8s.io"] + resources: + - "networkpolicies" + - "ingresses" + - "ingressclasses" + verbs: ["get", "list"] + + - apiGroups: ["batch"] + resources: + - "jobs" + - "cronjobs" + verbs: ["get", "list"] + + - apiGroups: ["policy"] + resources: + - "podsecuritypolicies" + verbs: ["get", "list"] diff --git a/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/templates/controller/k8s-discovery-clusterrolebinding.yaml b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/templates/controller/k8s-discovery-clusterrolebinding.yaml new file mode 100644 index 00000000..7173b513 --- /dev/null +++ b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/templates/controller/k8s-discovery-clusterrolebinding.yaml @@ -0,0 +1,14 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: {{ include "controller.fullname" . }}-k8s-discovery + labels: + {{- include "controller.labels" . | nindent 4 }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: {{ include "controller.fullname" . }}-k8s-discovery +subjects: + - kind: ServiceAccount + name: {{ include "controller.serviceAccountName" . }} + namespace: {{ .Release.Namespace }} diff --git a/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/templates/controller/kafka-authz-configmap.yaml b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/templates/controller/kafka-authz-configmap.yaml new file mode 100644 index 00000000..eeaef914 --- /dev/null +++ b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/templates/controller/kafka-authz-configmap.yaml @@ -0,0 +1,12 @@ +{{- if .Values.global.isConnectionEnforcementEnabled -}} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ include "product.name" . }}-kafka-authz + namespace: {{ .Release.Namespace }} + labels: + {{- include "controller.labels" . | nindent 4 }} +data: + broker.port: |- + 9092 +{{- end }} \ No newline at end of file diff --git a/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/templates/controller/kafka-injector-configmap.yaml b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/templates/controller/kafka-injector-configmap.yaml new file mode 100644 index 00000000..095fa50e --- /dev/null +++ b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/templates/controller/kafka-injector-configmap.yaml @@ -0,0 +1,25 @@ +{{- if .Values.global.isConnectionEnforcementEnabled -}} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ include "product.name" . }}-kafka-injector + namespace: {{ .Release.Namespace }} + labels: + {{- include "controller.labels" . | nindent 4 }} +data: + config: |- + name: {{ include "product.name" . }}-kafka-authz-injector + image: "{{ default .Values.global.registry .Values.kafkaAuthzInjector.image.registry }}/{{ .Values.kafkaAuthzInjector.image.repository }}:{{ .Values.kafkaAuthzInjector.image.tag }}" + imagePullPolicy: {{ .Values.kafkaAuthzInjector.image.pullPolicy }} + securityContext: + {{- toYaml .Values.kafkaAuthzInjector.securityContext | nindent 6 }} + volumeMounts: + - name: {{ include "product.name" . }} + mountPath: /jar + supported.broker.names: |- + {"broker":true,"cp-kafka-broker":true,"kafka-broker":true,"kafka":true} + supported.configmap.field.names: |- + {"server.properties":true,"server.config":true} + supported.start.commands: |- + {"kafka-server-start.sh":"KAFKA_NATIVE_DISTRIBUTION","docker/run":"KAFKA_DOCKER_DISTRIBUTION","kafka_run.sh":"KAFKA_NATIVE_DISTRIBUTION"} +{{- end }} \ No newline at end of file diff --git a/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/templates/controller/mutatingwebhookconfiguration.yaml b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/templates/controller/mutatingwebhookconfiguration.yaml new file mode 100644 index 00000000..8f79fd55 --- /dev/null +++ b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/templates/controller/mutatingwebhookconfiguration.yaml @@ -0,0 +1,104 @@ +{{- if .Values.global.isContainerSecurityEnabled -}} +apiVersion: admissionregistration.k8s.io/v1 +kind: MutatingWebhookConfiguration +metadata: + name: {{ include "product.name" . }}-mutating-webhook + labels: + owner: {{ include "product.name" . }} + {{- include "controller.labels" . | nindent 4 }} +webhooks: +{{- if .Values.global.isConnectionEnforcementEnabled }} + - admissionReviewVersions: + - v1 + - v1beta1 + clientConfig: + caBundle: "" + service: + name: {{ include "controller.fullname" . }} + namespace: {{ .Release.Namespace }} + path: /webhooks/mutating/connections/pods + port: 443 + failurePolicy: Ignore + matchPolicy: Equivalent + name: secureapplication-connections-protected.pod.validator.{{ include "product.name" . }}-webhook + namespaceSelector: + matchExpressions: + - key: SecureApplication-protected + operator: In + values: + - full + - connections-only + reinvocationPolicy: Never + rules: + - apiGroups: + - '*' + apiVersions: + - '*' + operations: + - CREATE + resources: + - pods + scope: '*' + sideEffects: None + timeoutSeconds: 10 +{{- end }} + - admissionReviewVersions: + - v1 + - v1beta1 + clientConfig: + caBundle: "" + service: + name: {{ include "controller.fullname" . }} + namespace: {{ .Release.Namespace }} + path: /webhooks/mutating/deployment/pods + port: 443 + failurePolicy: Ignore + matchPolicy: Equivalent + name: secureapplication-deployment-protected.pod.validator.{{ include "product.name" . }}-webhook + namespaceSelector: + matchExpressions: + - key: SecureApplication-protected + operator: In + values: + - full + - deployment-only + reinvocationPolicy: Never + rules: + - apiGroups: + - '*' + apiVersions: + - '*' + operations: + - CREATE + resources: + - pods + scope: '*' + sideEffects: None + timeoutSeconds: 10 + {{- if .Values.global.autoLabelEnabled }} + - admissionReviewVersions: + - v1 + - v1beta1 + clientConfig: + caBundle: "" + service: + name: {{ include "controller.fullname" . }} + namespace: {{ .Release.Namespace }} + path: /webhooks/mutating/namespaces + port: 443 + failurePolicy: Ignore + name: namespace.mutating.{{ include "product.name" . }}-webhook + rules: + - apiGroups: + - '*' + apiVersions: + - '*' + operations: + - CREATE + resources: + - namespaces + scope: '*' + sideEffects: None + timeoutSeconds: 10 + {{- end }} +{{- end }} \ No newline at end of file diff --git a/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/templates/controller/pdb.yaml b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/templates/controller/pdb.yaml new file mode 100644 index 00000000..82a1de88 --- /dev/null +++ b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/templates/controller/pdb.yaml @@ -0,0 +1,19 @@ +{{- if and .Values.controller.pdb.create (not .Values.controller.persistence.enabled) -}} +apiVersion: {{ include "panoptica.capabilities.pdb.apiVersion" . }} +kind: PodDisruptionBudget +metadata: + name: {{ include "controller.fullname" . }} + namespace: {{ .Release.Namespace }} + labels: + {{- include "controller.labels" . | nindent 4 }} +spec: + {{- if .Values.controller.pdb.minAvailable }} + minAvailable: {{ .Values.controller.pdb.minAvailable }} + {{- end }} + {{- if .Values.controller.pdb.maxUnavailable }} + maxUnavailable: {{ .Values.controller.pdb.maxUnavailable }} + {{- end }} + selector: + matchLabels: + {{- include "controller.labels" . | nindent 6 }} +{{- end }} \ No newline at end of file diff --git a/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/templates/controller/post-delete-job.yaml b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/templates/controller/post-delete-job.yaml new file mode 100644 index 00000000..590ff73b --- /dev/null +++ b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/templates/controller/post-delete-job.yaml @@ -0,0 +1,104 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ include "controller.fullname" . }}-post-delete + namespace: {{ .Release.Namespace }} + labels: + {{- include "controller.labels" . | nindent 4 }} + annotations: + helm.sh/hook: post-delete + helm.sh/hook-delete-policy: before-hook-creation,hook-succeeded,hook-failed +data: + uninstall.sh: |- + #!/bin/sh -x + + kubectl -n {{ .Release.Namespace }} delete secrets --field-selector type={{ include "product.name" . }}.io/ca-root --ignore-not-found +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: {{ include "controller.fullname" . }}-post-delete + namespace: {{ .Release.Namespace }} + labels: + {{- include "controller.labels" . | nindent 4 }} + annotations: + helm.sh/hook: post-delete + helm.sh/hook-delete-policy: before-hook-creation,hook-succeeded,hook-failed +rules: + - apiGroups: [""] + resources: ["secrets"] + verbs: ["list", "delete"] +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ include "controller.fullname" . }}-post-delete + namespace: {{ .Release.Namespace }} + labels: + {{- include "controller.labels" . | nindent 4 }} + annotations: + helm.sh/hook: post-delete + helm.sh/hook-delete-policy: before-hook-creation,hook-succeeded,hook-failed +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: {{ include "controller.fullname" . }}-post-delete + namespace: {{ .Release.Namespace }} + labels: + {{- include "controller.labels" . | nindent 4 }} + annotations: + helm.sh/hook: post-delete + helm.sh/hook-delete-policy: before-hook-creation,hook-succeeded,hook-failed +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: {{ include "controller.fullname" . }}-post-delete +subjects: + - kind: ServiceAccount + name: {{ include "controller.fullname" . }}-post-delete + namespace: {{ .Release.Namespace }} +--- +apiVersion: batch/v1 +kind: Job +metadata: + name: {{ include "controller.fullname" . }}-post-delete + namespace: {{ .Release.Namespace }} + labels: + {{- include "controller.labels" . | nindent 4 }} + annotations: + helm.sh/hook: post-delete + helm.sh/hook-delete-policy: before-hook-creation,hook-succeeded,hook-failed +spec: + backoffLimit: 0 + template: + metadata: + name: {{ include "controller.fullname" . }}-post-delete + spec: + restartPolicy: Never + serviceAccountName: {{ include "controller.fullname" . }}-post-delete + securityContext: + runAsNonRoot: true + runAsUser: 65532 + fsGroup: 65532 + containers: + - name: kubectl + image: "{{ default .Values.global.registry .Values.kubectl.image.registry }}/{{ .Values.kubectl.image.repository }}:{{ .Values.kubectl.image.tag }}" + imagePullPolicy: {{ .Values.kubectl.image.pullPolicy }} + command: [ "/bin/sh", "-c", "/opt/uninstall.sh" ] + volumeMounts: + - name: uninstall-script + mountPath: /opt/uninstall.sh + subPath: uninstall.sh + volumes: + - name: uninstall-script + configMap: + name: {{ include "controller.fullname" . }}-post-delete + items: + - key: uninstall.sh + path: uninstall.sh + mode: 0777 + {{- with .Values.controller.nodeSelector }} + nodeSelector: + {{- toYaml . | nindent 8 }} + {{- end }} diff --git a/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/templates/controller/pvc.yaml b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/templates/controller/pvc.yaml new file mode 100644 index 00000000..13a8bcd3 --- /dev/null +++ b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/templates/controller/pvc.yaml @@ -0,0 +1,26 @@ +{{- if .Values.controller.persistence.enabled -}} +kind: PersistentVolumeClaim +apiVersion: v1 +metadata: + {{- if .Values.global.dummyPlaceHolderForTest }} + name: {{ include "product.name" . }}-pvc-for-test + {{- else }} + name: {{ include "product.name" . }}-{{ .Values.controller.persistence.pvcSuffix }} + {{- end }} + namespace: {{ .Release.Namespace }} + labels: + {{- include "controller.labels" . | nindent 4 }} +spec: + accessModes: + - {{ .Values.controller.persistence.accessMode | quote }} + resources: + requests: + storage: {{ .Values.controller.persistence.size }} + {{- if .Values.controller.persistence.storageClass }} + {{- if (eq "-" .Values.controller.persistence.storageClass) }} + storageClassName: "" + {{- else }} + storageClassName: {{ .Values.controller.persistence.storageClass }} + {{- end }} + {{- end }} +{{- end }} diff --git a/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/templates/controller/role.yaml b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/templates/controller/role.yaml new file mode 100644 index 00000000..89b322fa --- /dev/null +++ b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/templates/controller/role.yaml @@ -0,0 +1,16 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: {{ include "controller.fullname" . }} + namespace: {{ .Release.Namespace }} + labels: + {{- include "controller.labels" . | nindent 4 }} +rules: + # Leases and configmaps are required for leader election, legacy leader + # election uses configmaps, new deploys will use leases. + - apiGroups: [""] + resources: ["configmaps"] + verbs: ["get", "update", "create", "delete", "list", "watch"] + - apiGroups: ["coordination.k8s.io"] + resources: ["leases"] + verbs: ["get", "update", "create", "delete", "list", "watch"] \ No newline at end of file diff --git a/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/templates/controller/rolebinding.yaml b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/templates/controller/rolebinding.yaml new file mode 100644 index 00000000..84b49f07 --- /dev/null +++ b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/templates/controller/rolebinding.yaml @@ -0,0 +1,15 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: {{ include "controller.fullname" . }} + namespace: {{ .Release.Namespace }} + labels: + {{- include "controller.labels" . | nindent 4 }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: {{ include "controller.fullname" . }} +subjects: + - kind: ServiceAccount + name: {{ include "controller.serviceAccountName" . }} + namespace: {{ .Release.Namespace }} \ No newline at end of file diff --git a/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/templates/controller/secret.yaml b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/templates/controller/secret.yaml new file mode 100644 index 00000000..e214a66c --- /dev/null +++ b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/templates/controller/secret.yaml @@ -0,0 +1,12 @@ +{{- if not .Values.controller.secret.existingSecret -}} +apiVersion: v1 +kind: Secret +metadata: + name: {{ include "controller.fullname" . }} + namespace: {{ .Release.Namespace }} + labels: + {{- include "controller.labels" . | nindent 4 }} +type: Opaque +data: + {{ .Values.controller.secret.sharedSecretKey | default "SHARED_SECRET" }}: {{ required "Shared secret is required!" .Values.controller.secret.sharedSecret | b64enc }} +{{- end }} \ No newline at end of file diff --git a/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/templates/controller/service.yaml b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/templates/controller/service.yaml new file mode 100644 index 00000000..4c610614 --- /dev/null +++ b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/templates/controller/service.yaml @@ -0,0 +1,82 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ include "controller.fullname" . }} + namespace: {{ .Release.Namespace }} + labels: + {{- include "controller.labels" . | nindent 4 }} +spec: + type: {{ .Values.controller.service.type }} + ports: + - name: grpc-image-analysis-sbom-result-forwarding-server + port: 25235 + protocol: TCP + targetPort: 25235 + {{- if .Values.global.k8sCisBenchmarkEnabled }} + - name: grpc-k8s-cis-benchmark-result-forwarding-server + port: 24235 + protocol: TCP + targetPort: 24235 + {{- end }} + {{- if .Values.global.isContainerSecurityEnabled }} + {{- if .Values.global.isConnectionEnforcementEnabled }} + - name: {{ include "product.name" . }}-agent-sidecars + port: 24225 + protocol: TCP + targetPort: 24225 + - name: grpc-{{ include "product.name" . }}-agent-inbound-ext-auth-server + port: 24229 + protocol: TCP + targetPort: 24229 + - name: grpc-{{ include "product.name" . }}-agent-outbound-ext-auth-server + port: 24226 + protocol: TCP + targetPort: 24226 + - name: grpc-istio-adapter-server + port: 24227 + protocol: TCP + targetPort: 24227 + - name: grpc-connection-event-forwarding-server + port: 24234 + protocol: TCP + targetPort: 24234 + - name: grpc-{{ include "product.name" . }}-agent-kafka-authz-server + port: 24231 + protocol: TCP + targetPort: 24231 + - name: http-agent-kafka-opa-server + port: 8181 + protocol: TCP + targetPort: 8181 + {{- end }} + - name: {{ include "product.name" . }}-agent-webhook-server + port: 443 + protocol: TCP + targetPort: 8443 + - name: grpc-{{ include "product.name" . }}-agent-cd-server + port: 24230 + protocol: TCP + targetPort: 24230 + - name: grpc-{{ include "product.name" . }}-agent-dns-exporter-server + port: 24232 + protocol: TCP + targetPort: 24232 + {{- if .Values.global.tokenInjectionEnabled }} + - name: vault-env-log-server + port: 9514 + protocol: UDP + targetPort: 9514 + {{- end }} + {{- end }} + {{- if .Values.global.isAPISecurityEnabled }} + - name: http-wasm-filter-file-server + port: 8081 + protocol: TCP + targetPort: 8081 + - name: http-apiclarity-notification-server + port: 8082 + protocol: TCP + targetPort: 8082 + {{- end }} + selector: + {{- include "controller.selectorLabels" . | nindent 4 }} diff --git a/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/templates/controller/serviceaccount.yaml b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/templates/controller/serviceaccount.yaml new file mode 100644 index 00000000..2d0f1a4e --- /dev/null +++ b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/templates/controller/serviceaccount.yaml @@ -0,0 +1,13 @@ +{{- if .Values.controller.serviceAccount.create -}} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ include "controller.serviceAccountName" . }} + namespace: {{ .Release.Namespace }} + labels: + {{- include "controller.labels" . | nindent 4 }} + {{- with .Values.controller.serviceAccount.annotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} +{{- end }} diff --git a/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/templates/controller/sidecar-injector-configmap.yaml b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/templates/controller/sidecar-injector-configmap.yaml new file mode 100644 index 00000000..9a5b8bcc --- /dev/null +++ b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/templates/controller/sidecar-injector-configmap.yaml @@ -0,0 +1,58 @@ +{{- if .Values.global.isConnectionEnforcementEnabled -}} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ include "product.name" . }}-sidecar-injector + namespace: {{ .Release.Namespace }} + labels: + {{- include "controller.labels" . | nindent 4 }} + {{- if .Values.global.dummyPlaceHolderForTest }} + role: test + {{- end }} +data: + config: |- + name: dns-detector + image: "{{ default .Values.global.registry .Values.dnsDetector.image.registry }}/{{ .Values.dnsDetector.image.repository }}:{{ .Values.dnsDetector.image.tag }}" + imagePullPolicy: {{ .Values.dnsDetector.image.pullPolicy }} + args: + - -dev + - eth0 + - -pod_status_grpc_address + - {{ include "controller.fullname" . }}.{{ .Release.Namespace }}:24225 + - -capture_export_addr + - {{ include "controller.fullname" . }}.{{ .Release.Namespace }}:24232 + {{- if .Values.istio.global.alreadyInstalled }} + - -passive_capture + {{- end }} + - -assembly_debug_log + - -debug + - warning + - -quiet + securityContext: + {{- if .Values.istio.global.alreadyInstalled }} + runAsUser: 0 + {{- else }} + capabilities: + drop: + - ALL + runAsNonRoot: true + runAsUser: 1337 + runAsGroup: 1337 + privileged: false + allowPrivilegeEscalation: false + readOnlyRootFilesystem: true + {{- end }} + env: + - name: POD_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: metadata.name + - name: POD_NAMESPACE + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: metadata.namespace + resources: + {{- toYaml .Values.dnsDetector.resources | nindent 6 }} +{{- end }} \ No newline at end of file diff --git a/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/templates/controller/validatingwebhookconfiguration.yaml b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/templates/controller/validatingwebhookconfiguration.yaml new file mode 100644 index 00000000..f294d463 --- /dev/null +++ b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/templates/controller/validatingwebhookconfiguration.yaml @@ -0,0 +1,156 @@ +{{- if .Values.global.isContainerSecurityEnabled -}} +apiVersion: admissionregistration.k8s.io/v1 +kind: ValidatingWebhookConfiguration +metadata: + name: {{ include "product.name" . }}-validating-webhook + labels: + owner: {{ include "product.name" . }} + {{- include "controller.labels" . | nindent 4 }} +webhooks: + - admissionReviewVersions: + - v1 + - v1beta1 + clientConfig: + caBundle: "" + service: + name: {{ include "controller.fullname" . }} + namespace: {{ .Release.Namespace }} + path: /webhooks/validating/pods + port: 443 + failurePolicy: Ignore + matchPolicy: Equivalent + name: secureapplication-deployment-protected.pod.validator.{{ include "product.name" . }}-webhook + namespaceSelector: + matchExpressions: + - key: SecureApplication-protected + operator: In + values: + - full + - deployment-only + objectSelector: {} + rules: + - apiGroups: + - '*' + apiVersions: + - '*' + operations: + - CREATE + resources: + - pods + scope: '*' + sideEffects: None + timeoutSeconds: 10 + {{- if .Values.global.k8sEventsEnabled }} + - admissionReviewVersions: + - v1 + - v1beta1 + clientConfig: + caBundle: "" + service: + name: {{ include "controller.fullname" . }} + namespace: {{ .Release.Namespace }} + path: /webhooks/audit/all + port: 443 + failurePolicy: Ignore + matchPolicy: Equivalent + name: all.auditor.{{ include "product.name" . }}-webhook + namespaceSelector: {} + objectSelector: {} + rules: + - apiGroups: + - '*' + apiVersions: + - '*' + operations: + - '*' + resources: + - '*/*' + scope: '*' + sideEffects: None + timeoutSeconds: 10 + {{- end }} + - admissionReviewVersions: + - v1 + - v1beta1 + clientConfig: + caBundle: "" + service: + name: {{ include "controller.fullname" . }} + namespace: {{ .Release.Namespace }} + path: /webhooks/jobs-owner + port: 443 + failurePolicy: Ignore + matchPolicy: Equivalent + name: jobs.owner.{{ include "product.name" . }}-webhook + namespaceSelector: {} + objectSelector: {} + rules: + - apiGroups: + - batch + apiVersions: + - v1 + operations: + - CREATE + - DELETE + resources: + - jobs + scope: '*' + sideEffects: None + timeoutSeconds: 10 + {{- if .Values.global.validateDeployerPolicy }} + - admissionReviewVersions: + - v1 + - v1beta1 + clientConfig: + caBundle: "" + service: + name: {{ include "controller.fullname" . }} + namespace: {{ .Release.Namespace }} + path: /webhooks/deployer/policy + port: 443 + failurePolicy: Ignore + matchPolicy: Equivalent + name: deployer.policy.{{ include "product.name" . }}-webhook + namespaceSelector: {} + objectSelector: {} + rules: + - apiGroups: + - apps + apiVersions: + - '*' + operations: + - CREATE + - UPDATE + resources: ["deployments", "statefulsets", "daemonsets"] + scope: '*' + - apiGroups: + - batch + apiVersions: + - '*' + operations: + - CREATE + - UPDATE + resources: ["jobs", "cronjobs"] + scope: '*' + - apiGroups: + - "" + apiVersions: + - '*' + operations: + - CREATE + - UPDATE + resources: ["pods", "configmaps"] + scope: '*' + - apiGroups: + - rbac.authorization.k8s.io + apiVersions: + - '*' + operations: + - CREATE + - UPDATE + resources: ["roles", "clusterroles"] + scope: '*' + sideEffects: None + timeoutSeconds: 10 + {{- end }} +{{- end }} \ No newline at end of file diff --git a/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/templates/istio/destinationrule.yaml b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/templates/istio/destinationrule.yaml new file mode 100644 index 00000000..c22516dd --- /dev/null +++ b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/templates/istio/destinationrule.yaml @@ -0,0 +1,16 @@ +{{- if and .Values.global.isConnectionEnforcementEnabled .Values.istio.global.alreadyInstalled -}} +apiVersion: networking.istio.io/v1beta1 +kind: DestinationRule +metadata: + name: disable-mtls-to-{{ include "product.name" . }}-agent + namespace: {{ .Release.Namespace }} + labels: + owner: {{ include "product.name" . }} +spec: + host: "{{ include "controller.fullname" . }}.{{ .Release.Namespace }}.svc.cluster.local" + exportTo: + - "*" + trafficPolicy: + tls: + mode: DISABLE +{{- end }} diff --git a/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/templates/istio/extauth_1.10.yaml b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/templates/istio/extauth_1.10.yaml new file mode 100644 index 00000000..f8202007 --- /dev/null +++ b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/templates/istio/extauth_1.10.yaml @@ -0,0 +1,385 @@ +{{- if .Values.global.isConnectionEnforcementEnabled -}} +{{- if semverCompare "1.10 - 1.12" .Values.istio.global.version -}} +apiVersion: networking.istio.io/v1alpha3 +kind: EnvoyFilter +metadata: + name: ext-authz-1.10 + namespace: istio-system + labels: + owner: {{ include "product.name" . }} + workload-type: pod +spec: + workloadSelector: + labels: + {{ include "product.name" . }}.io/envoy-auth-config: pod + configPatches: + - applyTo: CLUSTER + match: + proxy: + proxyVersion: ^1\.10.* + context: ANY + cluster: + name: "outbound-ext-authz" + patch: + operation: ADD + value: # cluster specification + name: "outbound-ext-authz" + type: STRICT_DNS + http2_protocol_options: {} + connect_timeout: 10s + load_assignment: + cluster_name: "outbound-ext-authz" + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: "{{ include "controller.fullname" . }}.{{ .Release.Namespace }}" + port_value: 24226 + {{- if and .Values.global.isAPISecurityEnabled (not .Values.istio.global.alreadyInstalled) }} + transport_socket: + name: envoy.transport_sockets.tls + typed_config: + "@type": type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.UpstreamTlsContext + sni: outbound_.24226_._.{{ include "controller.fullname" . }}.{{ .Release.Namespace }}.svc.cluster.local + common_tls_context: + alpn_protocols: + - "istio-peer-exchange" + - "istio" + - "h2" + tls_certificate_sds_secret_configs: + - name: default + sds_config: + api_config_source: + api_type: "GRPC" + grpc_services: + - envoy_grpc: + cluster_name: "sds-grpc" + set_node_on_first_message_only: true + transport_api_version: V3 + initial_fetch_timeout: 0s + resource_api_version: V3 + combined_validation_context: + default_validation_context: + match_subject_alt_names: + - exact: spiffe://cluster.local/ns/{{ .Release.Namespace }}/sa/{{ include "controller.serviceAccountName" . }} + validation_context_sds_secret_config: + name: "ROOTCA" + sds_config: + api_config_source: + api_type: "GRPC" + grpc_services: + - envoy_grpc: + cluster_name: "sds-grpc" + set_node_on_first_message_only: true + transport_api_version: V3 + initial_fetch_timeout: 0s + resource_api_version: V3 + {{- end }} + - applyTo: CLUSTER + match: + context: ANY + cluster: + name: "inbound-ext-authz" + proxy: + proxyVersion: ^1\.10.* + patch: + operation: ADD + value: # cluster specification + name: "inbound-ext-authz" + type: STRICT_DNS + http2_protocol_options: {} + connect_timeout: 10s + load_assignment: + cluster_name: "inbound-ext-authz" + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: "{{ include "controller.fullname" . }}.{{ .Release.Namespace }}" + port_value: 24229 + {{- if and .Values.global.isAPISecurityEnabled (not .Values.istio.global.alreadyInstalled) }} + transport_socket: + name: envoy.transport_sockets.tls + typed_config: + "@type": type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.UpstreamTlsContext + sni: outbound_.24229_._.{{ include "controller.fullname" . }}.{{ .Release.Namespace }}.svc.cluster.local + common_tls_context: + alpn_protocols: + - "istio-peer-exchange" + - "istio" + - "h2" + tls_certificate_sds_secret_configs: + - name: default + sds_config: + api_config_source: + api_type: "GRPC" + grpc_services: + - envoy_grpc: + cluster_name: "sds-grpc" + set_node_on_first_message_only: true + transport_api_version: V3 + initial_fetch_timeout: 0s + resource_api_version: V3 + combined_validation_context: + default_validation_context: + match_subject_alt_names: + - exact: spiffe://cluster.local/ns/{{ .Release.Namespace }}/sa/{{ include "controller.serviceAccountName" . }} + validation_context_sds_secret_config: + name: "ROOTCA" + sds_config: + api_config_source: + api_type: "GRPC" + grpc_services: + - envoy_grpc: + cluster_name: "sds-grpc" + set_node_on_first_message_only: true + transport_api_version: V3 + initial_fetch_timeout: 0s + resource_api_version: V3 + {{- end }} + - applyTo: NETWORK_FILTER + match: + proxy: + proxyVersion: ^1\.10.* + context: SIDECAR_OUTBOUND + listener: + filterChain: + filter: + name: "envoy.filters.network.tcp_proxy" + patch: + operation: INSERT_BEFORE + value: + name: "envoy.filters.network.ext_authz" + typed_config: + "@type": "type.googleapis.com/envoy.extensions.filters.network.ext_authz.v3.ExtAuthz" + stat_prefix: ext_authz + transport_api_version: V3 + include_peer_certificate: true + failure_mode_allow: {{ .Values.global.connectionFailPolicyAllow }} + grpc_service: + envoy_grpc: + cluster_name: outbound-ext-authz + - applyTo: HTTP_FILTER + match: + proxy: + proxyVersion: ^1\.10.* + context: SIDECAR_OUTBOUND + listener: + filterChain: + filter: + name: "envoy.filters.network.http_connection_manager" + subFilter: + name: "envoy.filters.http.router" + patch: + operation: INSERT_BEFORE + value: + name: "envoy.filters.http.ext_authz" + typed_config: + "@type": "type.googleapis.com/envoy.extensions.filters.http.ext_authz.v3.ExtAuthz" + transport_api_version: V3 + include_peer_certificate: true + failure_mode_allow: {{ .Values.global.connectionFailPolicyAllow }} + grpc_service: + envoy_grpc: + cluster_name: outbound-ext-authz + - applyTo: NETWORK_FILTER + match: + proxy: + proxyVersion: ^1\.10.* + context: SIDECAR_INBOUND + listener: + filterChain: + filter: + name: "envoy.filters.network.tcp_proxy" + patch: + operation: INSERT_BEFORE + value: + name: "envoy.filters.network.ext_authz" + typed_config: + "@type": "type.googleapis.com/envoy.extensions.filters.network.ext_authz.v3.ExtAuthz" + stat_prefix: ext_authz + transport_api_version: V3 + include_peer_certificate: true + failure_mode_allow: {{ .Values.global.connectionFailPolicyAllow }} + grpc_service: + envoy_grpc: + cluster_name: inbound-ext-authz + - applyTo: HTTP_FILTER + match: + proxy: + proxyVersion: ^1\.10.* + context: SIDECAR_INBOUND + listener: + filterChain: + filter: + name: "envoy.filters.network.http_connection_manager" + subFilter: + name: "envoy.filters.http.router" + patch: + operation: INSERT_BEFORE + value: + name: "envoy.filters.http.ext_authz" + typed_config: + "@type": "type.googleapis.com/envoy.extensions.filters.http.ext_authz.v3.ExtAuthz" + transport_api_version: V3 + include_peer_certificate: true + failure_mode_allow: {{ .Values.global.connectionFailPolicyAllow }} + grpc_service: + envoy_grpc: + cluster_name: inbound-ext-authz +{{- if .Values.istio.expansion.enabled -}} +--- +apiVersion: networking.istio.io/v1alpha3 +kind: EnvoyFilter +metadata: + name: ext-authz-expansion-1.10 + namespace: istio-system + labels: + owner: {{ include "product.name" . }} + workload-type: expansion +spec: + workloadSelector: + labels: + {{ include "product.name" . }}.io/envoy-auth-config: expansion + configPatches: + - applyTo: CLUSTER + match: + proxy: + proxyVersion: ^1\.10.* + context: ANY + cluster: + name: "outbound-ext-authz" + patch: + operation: ADD + value: # cluster specification + name: "outbound-ext-authz" + type: STATIC + http2_protocol_options: {} + connect_timeout: 10s + load_assignment: + cluster_name: "outbound-ext-authz" + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: "127.0.0.1" + port_value: 24226 + - applyTo: CLUSTER + match: + proxy: + proxyVersion: ^1\.10.* + context: ANY + cluster: + name: "inbound-ext-authz" + patch: + operation: ADD + value: # cluster specification + name: "inbound-ext-authz" + type: STATIC + http2_protocol_options: {} + connect_timeout: 10s + load_assignment: + cluster_name: "inbound-ext-authz" + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: "127.0.0.1" + port_value: 24229 + - applyTo: NETWORK_FILTER + match: + proxy: + proxyVersion: ^1\.10.* + context: SIDECAR_OUTBOUND + listener: + filterChain: + filter: + name: "envoy.filters.network.tcp_proxy" + patch: + operation: INSERT_BEFORE + value: + name: "envoy.filters.network.ext_authz" + typed_config: + "@type": "type.googleapis.com/envoy.extensions.filters.network.ext_authz.v3.ExtAuthz" + stat_prefix: ext_authz + transport_api_version: V3 + include_peer_certificate: true + failure_mode_allow: {{ .Values.global.connectionFailPolicyAllow }} + grpc_service: + envoy_grpc: + cluster_name: outbound-ext-authz + - applyTo: HTTP_FILTER + match: + proxy: + proxyVersion: ^1\.10.* + context: SIDECAR_OUTBOUND + listener: + filterChain: + filter: + name: "envoy.filters.network.http_connection_manager" + subFilter: + name: "envoy.filters.http.router" + patch: + operation: INSERT_BEFORE + value: + name: "envoy.filters.http.ext_authz" + typed_config: + "@type": "type.googleapis.com/envoy.extensions.filters.http.ext_authz.v3.ExtAuthz" + transport_api_version: V3 + include_peer_certificate: true + failure_mode_allow: {{ .Values.global.connectionFailPolicyAllow }} + grpc_service: + envoy_grpc: + cluster_name: outbound-ext-authz + - applyTo: NETWORK_FILTER + match: + proxy: + proxyVersion: ^1\.10.* + context: SIDECAR_INBOUND + listener: + filterChain: + filter: + name: "envoy.filters.network.tcp_proxy" + patch: + operation: INSERT_BEFORE + value: + name: "envoy.filters.network.ext_authz" + typed_config: + "@type": "type.googleapis.com/envoy.extensions.filters.network.ext_authz.v3.ExtAuthz" + stat_prefix: ext_authz + transport_api_version: V3 + include_peer_certificate: true + failure_mode_allow: {{ .Values.global.connectionFailPolicyAllow }} + grpc_service: + envoy_grpc: + cluster_name: inbound-ext-authz + - applyTo: HTTP_FILTER + match: + proxy: + proxyVersion: ^1\.10.* + context: SIDECAR_INBOUND + listener: + filterChain: + filter: + name: "envoy.filters.network.http_connection_manager" + subFilter: + name: "envoy.filters.http.router" + patch: + operation: INSERT_BEFORE + value: + name: "envoy.filters.http.ext_authz" + typed_config: + "@type": "type.googleapis.com/envoy.extensions.filters.http.ext_authz.v3.ExtAuthz" + transport_api_version: V3 + include_peer_certificate: true + failure_mode_allow: {{ .Values.global.connectionFailPolicyAllow }} + grpc_service: + envoy_grpc: + cluster_name: inbound-ext-authz +{{- end }} +{{- end }} +{{- end }} diff --git a/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/templates/istio/extauth_1.11.yaml b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/templates/istio/extauth_1.11.yaml new file mode 100644 index 00000000..f59c8227 --- /dev/null +++ b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/templates/istio/extauth_1.11.yaml @@ -0,0 +1,385 @@ +{{- if .Values.global.isConnectionEnforcementEnabled -}} +{{- if semverCompare "1.11 - 1.11" .Values.istio.global.version -}} +apiVersion: networking.istio.io/v1alpha3 +kind: EnvoyFilter +metadata: + name: ext-authz-1.11 + namespace: istio-system + labels: + owner: {{ include "product.name" . }} + workload-type: pod +spec: + workloadSelector: + labels: + {{ include "product.name" . }}.io/envoy-auth-config: pod + configPatches: + - applyTo: CLUSTER + match: + proxy: + proxyVersion: ^1\.11.* + context: ANY + cluster: + name: "outbound-ext-authz" + patch: + operation: ADD + value: # cluster specification + name: "outbound-ext-authz" + type: STRICT_DNS + http2_protocol_options: {} + connect_timeout: 10s + load_assignment: + cluster_name: "outbound-ext-authz" + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: "{{ include "controller.fullname" . }}.{{ .Release.Namespace }}" + port_value: 24226 + {{- if and .Values.global.isAPISecurityEnabled (not .Values.istio.global.alreadyInstalled) }} + transport_socket: + name: envoy.transport_sockets.tls + typed_config: + "@type": type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.UpstreamTlsContext + sni: outbound_.24226_._.{{ include "controller.fullname" . }}.{{ .Release.Namespace }}.svc.cluster.local + common_tls_context: + alpn_protocols: + - "istio-peer-exchange" + - "istio" + - "h2" + tls_certificate_sds_secret_configs: + - name: default + sds_config: + api_config_source: + api_type: "GRPC" + grpc_services: + - envoy_grpc: + cluster_name: "sds-grpc" + set_node_on_first_message_only: true + transport_api_version: V3 + initial_fetch_timeout: 0s + resource_api_version: V3 + combined_validation_context: + default_validation_context: + match_subject_alt_names: + - exact: spiffe://cluster.local/ns/{{ .Release.Namespace }}/sa/{{ include "controller.serviceAccountName" . }} + validation_context_sds_secret_config: + name: "ROOTCA" + sds_config: + api_config_source: + api_type: "GRPC" + grpc_services: + - envoy_grpc: + cluster_name: "sds-grpc" + set_node_on_first_message_only: true + transport_api_version: V3 + initial_fetch_timeout: 0s + resource_api_version: V3 + {{- end }} + - applyTo: CLUSTER + match: + context: ANY + cluster: + name: "inbound-ext-authz" + proxy: + proxyVersion: ^1\.11.* + patch: + operation: ADD + value: # cluster specification + name: "inbound-ext-authz" + type: STRICT_DNS + http2_protocol_options: {} + connect_timeout: 10s + load_assignment: + cluster_name: "inbound-ext-authz" + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: "{{ include "controller.fullname" . }}.{{ .Release.Namespace }}" + port_value: 24229 + {{- if and .Values.global.isAPISecurityEnabled (not .Values.istio.global.alreadyInstalled) }} + transport_socket: + name: envoy.transport_sockets.tls + typed_config: + "@type": type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.UpstreamTlsContext + sni: outbound_.24229_._.{{ include "controller.fullname" . }}.{{ .Release.Namespace }}.svc.cluster.local + common_tls_context: + alpn_protocols: + - "istio-peer-exchange" + - "istio" + - "h2" + tls_certificate_sds_secret_configs: + - name: default + sds_config: + api_config_source: + api_type: "GRPC" + grpc_services: + - envoy_grpc: + cluster_name: "sds-grpc" + set_node_on_first_message_only: true + transport_api_version: V3 + initial_fetch_timeout: 0s + resource_api_version: V3 + combined_validation_context: + default_validation_context: + match_subject_alt_names: + - exact: spiffe://cluster.local/ns/{{ .Release.Namespace }}/sa/{{ include "controller.serviceAccountName" . }} + validation_context_sds_secret_config: + name: "ROOTCA" + sds_config: + api_config_source: + api_type: "GRPC" + grpc_services: + - envoy_grpc: + cluster_name: "sds-grpc" + set_node_on_first_message_only: true + transport_api_version: V3 + initial_fetch_timeout: 0s + resource_api_version: V3 + {{- end }} + - applyTo: NETWORK_FILTER + match: + proxy: + proxyVersion: ^1\.11.* + context: SIDECAR_OUTBOUND + listener: + filterChain: + filter: + name: "envoy.filters.network.tcp_proxy" + patch: + operation: INSERT_BEFORE + value: + name: "envoy.filters.network.ext_authz" + typed_config: + "@type": "type.googleapis.com/envoy.extensions.filters.network.ext_authz.v3.ExtAuthz" + stat_prefix: ext_authz + transport_api_version: V3 + include_peer_certificate: true + failure_mode_allow: {{ .Values.global.connectionFailPolicyAllow }} + grpc_service: + envoy_grpc: + cluster_name: outbound-ext-authz + - applyTo: HTTP_FILTER + match: + proxy: + proxyVersion: ^1\.11.* + context: SIDECAR_OUTBOUND + listener: + filterChain: + filter: + name: "envoy.filters.network.http_connection_manager" + subFilter: + name: "envoy.filters.http.router" + patch: + operation: INSERT_BEFORE + value: + name: "envoy.filters.http.ext_authz" + typed_config: + "@type": "type.googleapis.com/envoy.extensions.filters.http.ext_authz.v3.ExtAuthz" + transport_api_version: V3 + include_peer_certificate: true + failure_mode_allow: {{ .Values.global.connectionFailPolicyAllow }} + grpc_service: + envoy_grpc: + cluster_name: outbound-ext-authz + - applyTo: NETWORK_FILTER + match: + proxy: + proxyVersion: ^1\.11.* + context: SIDECAR_INBOUND + listener: + filterChain: + filter: + name: "envoy.filters.network.tcp_proxy" + patch: + operation: INSERT_BEFORE + value: + name: "envoy.filters.network.ext_authz" + typed_config: + "@type": "type.googleapis.com/envoy.extensions.filters.network.ext_authz.v3.ExtAuthz" + stat_prefix: ext_authz + transport_api_version: V3 + include_peer_certificate: true + failure_mode_allow: {{ .Values.global.connectionFailPolicyAllow }} + grpc_service: + envoy_grpc: + cluster_name: inbound-ext-authz + - applyTo: HTTP_FILTER + match: + proxy: + proxyVersion: ^1\.11.* + context: SIDECAR_INBOUND + listener: + filterChain: + filter: + name: "envoy.filters.network.http_connection_manager" + subFilter: + name: "envoy.filters.http.router" + patch: + operation: INSERT_BEFORE + value: + name: "envoy.filters.http.ext_authz" + typed_config: + "@type": "type.googleapis.com/envoy.extensions.filters.http.ext_authz.v3.ExtAuthz" + transport_api_version: V3 + include_peer_certificate: true + failure_mode_allow: {{ .Values.global.connectionFailPolicyAllow }} + grpc_service: + envoy_grpc: + cluster_name: inbound-ext-authz +{{- if .Values.istio.expansion.enabled -}} +--- +apiVersion: networking.istio.io/v1alpha3 +kind: EnvoyFilter +metadata: + name: ext-authz-expansion-1.11 + namespace: istio-system + labels: + owner: {{ include "product.name" . }} + workload-type: expansion +spec: + workloadSelector: + labels: + {{ include "product.name" . }}.io/envoy-auth-config: expansion + configPatches: + - applyTo: CLUSTER + match: + proxy: + proxyVersion: ^1\.11.* + context: ANY + cluster: + name: "outbound-ext-authz" + patch: + operation: ADD + value: # cluster specification + name: "outbound-ext-authz" + type: STATIC + http2_protocol_options: {} + connect_timeout: 10s + load_assignment: + cluster_name: "outbound-ext-authz" + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: "127.0.0.1" + port_value: 24226 + - applyTo: CLUSTER + match: + proxy: + proxyVersion: ^1\.11.* + context: ANY + cluster: + name: "inbound-ext-authz" + patch: + operation: ADD + value: # cluster specification + name: "inbound-ext-authz" + type: STATIC + http2_protocol_options: {} + connect_timeout: 10s + load_assignment: + cluster_name: "inbound-ext-authz" + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: "127.0.0.1" + port_value: 24229 + - applyTo: NETWORK_FILTER + match: + proxy: + proxyVersion: ^1\.11.* + context: SIDECAR_OUTBOUND + listener: + filterChain: + filter: + name: "envoy.filters.network.tcp_proxy" + patch: + operation: INSERT_BEFORE + value: + name: "envoy.filters.network.ext_authz" + typed_config: + "@type": "type.googleapis.com/envoy.extensions.filters.network.ext_authz.v3.ExtAuthz" + stat_prefix: ext_authz + transport_api_version: V3 + include_peer_certificate: true + failure_mode_allow: {{ .Values.global.connectionFailPolicyAllow }} + grpc_service: + envoy_grpc: + cluster_name: outbound-ext-authz + - applyTo: HTTP_FILTER + match: + proxy: + proxyVersion: ^1\.11.* + context: SIDECAR_OUTBOUND + listener: + filterChain: + filter: + name: "envoy.filters.network.http_connection_manager" + subFilter: + name: "envoy.filters.http.router" + patch: + operation: INSERT_BEFORE + value: + name: "envoy.filters.http.ext_authz" + typed_config: + "@type": "type.googleapis.com/envoy.extensions.filters.http.ext_authz.v3.ExtAuthz" + transport_api_version: V3 + include_peer_certificate: true + failure_mode_allow: {{ .Values.global.connectionFailPolicyAllow }} + grpc_service: + envoy_grpc: + cluster_name: outbound-ext-authz + - applyTo: NETWORK_FILTER + match: + proxy: + proxyVersion: ^1\.11.* + context: SIDECAR_INBOUND + listener: + filterChain: + filter: + name: "envoy.filters.network.tcp_proxy" + patch: + operation: INSERT_BEFORE + value: + name: "envoy.filters.network.ext_authz" + typed_config: + "@type": "type.googleapis.com/envoy.extensions.filters.network.ext_authz.v3.ExtAuthz" + stat_prefix: ext_authz + transport_api_version: V3 + include_peer_certificate: true + failure_mode_allow: {{ .Values.global.connectionFailPolicyAllow }} + grpc_service: + envoy_grpc: + cluster_name: inbound-ext-authz + - applyTo: HTTP_FILTER + match: + proxy: + proxyVersion: ^1\.11.* + context: SIDECAR_INBOUND + listener: + filterChain: + filter: + name: "envoy.filters.network.http_connection_manager" + subFilter: + name: "envoy.filters.http.router" + patch: + operation: INSERT_BEFORE + value: + name: "envoy.filters.http.ext_authz" + typed_config: + "@type": "type.googleapis.com/envoy.extensions.filters.http.ext_authz.v3.ExtAuthz" + transport_api_version: V3 + include_peer_certificate: true + failure_mode_allow: {{ .Values.global.connectionFailPolicyAllow }} + grpc_service: + envoy_grpc: + cluster_name: inbound-ext-authz +{{- end }} +{{- end }} +{{- end }} diff --git a/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/templates/istio/extauth_1.12.yaml b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/templates/istio/extauth_1.12.yaml new file mode 100644 index 00000000..f661381d --- /dev/null +++ b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/templates/istio/extauth_1.12.yaml @@ -0,0 +1,385 @@ +{{- if .Values.global.isConnectionEnforcementEnabled -}} +{{- if semverCompare "1.12 - 1.14" .Values.istio.global.version -}} +apiVersion: networking.istio.io/v1alpha3 +kind: EnvoyFilter +metadata: + name: ext-authz-1.12 + namespace: istio-system + labels: + owner: {{ include "product.name" . }} + workload-type: pod +spec: + workloadSelector: + labels: + {{ include "product.name" . }}.io/envoy-auth-config: pod + configPatches: + - applyTo: CLUSTER + match: + proxy: + proxyVersion: ^1\.12.* + context: ANY + cluster: + name: "outbound-ext-authz" + patch: + operation: ADD + value: # cluster specification + name: "outbound-ext-authz" + type: STRICT_DNS + http2_protocol_options: {} + connect_timeout: 10s + load_assignment: + cluster_name: "outbound-ext-authz" + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: "{{ include "controller.fullname" . }}.{{ .Release.Namespace }}" + port_value: 24226 + {{- if and .Values.global.isAPISecurityEnabled (not .Values.istio.global.alreadyInstalled) }} + transport_socket: + name: envoy.transport_sockets.tls + typed_config: + "@type": type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.UpstreamTlsContext + sni: outbound_.24226_._.{{ include "controller.fullname" . }}.{{ .Release.Namespace }}.svc.cluster.local + common_tls_context: + alpn_protocols: + - "istio-peer-exchange" + - "istio" + - "h2" + tls_certificate_sds_secret_configs: + - name: default + sds_config: + api_config_source: + api_type: "GRPC" + grpc_services: + - envoy_grpc: + cluster_name: "sds-grpc" + set_node_on_first_message_only: true + transport_api_version: V3 + initial_fetch_timeout: 0s + resource_api_version: V3 + combined_validation_context: + default_validation_context: + match_subject_alt_names: + - exact: spiffe://cluster.local/ns/{{ .Release.Namespace }}/sa/{{ include "controller.serviceAccountName" . }} + validation_context_sds_secret_config: + name: "ROOTCA" + sds_config: + api_config_source: + api_type: "GRPC" + grpc_services: + - envoy_grpc: + cluster_name: "sds-grpc" + set_node_on_first_message_only: true + transport_api_version: V3 + initial_fetch_timeout: 0s + resource_api_version: V3 + {{- end }} + - applyTo: CLUSTER + match: + context: ANY + cluster: + name: "inbound-ext-authz" + proxy: + proxyVersion: ^1\.12.* + patch: + operation: ADD + value: # cluster specification + name: "inbound-ext-authz" + type: STRICT_DNS + http2_protocol_options: {} + connect_timeout: 10s + load_assignment: + cluster_name: "inbound-ext-authz" + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: "{{ include "controller.fullname" . }}.{{ .Release.Namespace }}" + port_value: 24229 + {{- if and .Values.global.isAPISecurityEnabled (not .Values.istio.global.alreadyInstalled) }} + transport_socket: + name: envoy.transport_sockets.tls + typed_config: + "@type": type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.UpstreamTlsContext + sni: outbound_.24229_._.{{ include "controller.fullname" . }}.{{ .Release.Namespace }}.svc.cluster.local + common_tls_context: + alpn_protocols: + - "istio-peer-exchange" + - "istio" + - "h2" + tls_certificate_sds_secret_configs: + - name: default + sds_config: + api_config_source: + api_type: "GRPC" + grpc_services: + - envoy_grpc: + cluster_name: "sds-grpc" + set_node_on_first_message_only: true + transport_api_version: V3 + initial_fetch_timeout: 0s + resource_api_version: V3 + combined_validation_context: + default_validation_context: + match_subject_alt_names: + - exact: spiffe://cluster.local/ns/{{ .Release.Namespace }}/sa/{{ include "controller.serviceAccountName" . }} + validation_context_sds_secret_config: + name: "ROOTCA" + sds_config: + api_config_source: + api_type: "GRPC" + grpc_services: + - envoy_grpc: + cluster_name: "sds-grpc" + set_node_on_first_message_only: true + transport_api_version: V3 + initial_fetch_timeout: 0s + resource_api_version: V3 + {{- end }} + - applyTo: NETWORK_FILTER + match: + proxy: + proxyVersion: ^1\.12.* + context: SIDECAR_OUTBOUND + listener: + filterChain: + filter: + name: "envoy.filters.network.tcp_proxy" + patch: + operation: INSERT_BEFORE + value: + name: "envoy.filters.network.ext_authz" + typed_config: + "@type": "type.googleapis.com/envoy.extensions.filters.network.ext_authz.v3.ExtAuthz" + stat_prefix: ext_authz + transport_api_version: V3 + include_peer_certificate: true + failure_mode_allow: {{ .Values.global.connectionFailPolicyAllow }} + grpc_service: + envoy_grpc: + cluster_name: outbound-ext-authz + - applyTo: HTTP_FILTER + match: + proxy: + proxyVersion: ^1\.12.* + context: SIDECAR_OUTBOUND + listener: + filterChain: + filter: + name: "envoy.filters.network.http_connection_manager" + subFilter: + name: "envoy.filters.http.router" + patch: + operation: INSERT_BEFORE + value: + name: "envoy.filters.http.ext_authz" + typed_config: + "@type": "type.googleapis.com/envoy.extensions.filters.http.ext_authz.v3.ExtAuthz" + transport_api_version: V3 + include_peer_certificate: true + failure_mode_allow: {{ .Values.global.connectionFailPolicyAllow }} + grpc_service: + envoy_grpc: + cluster_name: outbound-ext-authz + - applyTo: NETWORK_FILTER + match: + proxy: + proxyVersion: ^1\.12.* + context: SIDECAR_INBOUND + listener: + filterChain: + filter: + name: "envoy.filters.network.tcp_proxy" + patch: + operation: INSERT_BEFORE + value: + name: "envoy.filters.network.ext_authz" + typed_config: + "@type": "type.googleapis.com/envoy.extensions.filters.network.ext_authz.v3.ExtAuthz" + stat_prefix: ext_authz + transport_api_version: V3 + include_peer_certificate: true + failure_mode_allow: {{ .Values.global.connectionFailPolicyAllow }} + grpc_service: + envoy_grpc: + cluster_name: inbound-ext-authz + - applyTo: HTTP_FILTER + match: + proxy: + proxyVersion: ^1\.12.* + context: SIDECAR_INBOUND + listener: + filterChain: + filter: + name: "envoy.filters.network.http_connection_manager" + subFilter: + name: "envoy.filters.http.router" + patch: + operation: INSERT_BEFORE + value: + name: "envoy.filters.http.ext_authz" + typed_config: + "@type": "type.googleapis.com/envoy.extensions.filters.http.ext_authz.v3.ExtAuthz" + transport_api_version: V3 + include_peer_certificate: true + failure_mode_allow: {{ .Values.global.connectionFailPolicyAllow }} + grpc_service: + envoy_grpc: + cluster_name: inbound-ext-authz +{{- if .Values.istio.expansion.enabled -}} +--- +apiVersion: networking.istio.io/v1alpha3 +kind: EnvoyFilter +metadata: + name: ext-authz-expansion-1.12 + namespace: istio-system + labels: + owner: {{ include "product.name" . }} + workload-type: expansion +spec: + workloadSelector: + labels: + {{ include "product.name" . }}.io/envoy-auth-config: expansion + configPatches: + - applyTo: CLUSTER + match: + proxy: + proxyVersion: ^1\.12.* + context: ANY + cluster: + name: "outbound-ext-authz" + patch: + operation: ADD + value: # cluster specification + name: "outbound-ext-authz" + type: STATIC + http2_protocol_options: {} + connect_timeout: 10s + load_assignment: + cluster_name: "outbound-ext-authz" + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: "127.0.0.1" + port_value: 24226 + - applyTo: CLUSTER + match: + proxy: + proxyVersion: ^1\.12.* + context: ANY + cluster: + name: "inbound-ext-authz" + patch: + operation: ADD + value: # cluster specification + name: "inbound-ext-authz" + type: STATIC + http2_protocol_options: {} + connect_timeout: 10s + load_assignment: + cluster_name: "inbound-ext-authz" + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: "127.0.0.1" + port_value: 24229 + - applyTo: NETWORK_FILTER + match: + proxy: + proxyVersion: ^1\.12.* + context: SIDECAR_OUTBOUND + listener: + filterChain: + filter: + name: "envoy.filters.network.tcp_proxy" + patch: + operation: INSERT_BEFORE + value: + name: "envoy.filters.network.ext_authz" + typed_config: + "@type": "type.googleapis.com/envoy.extensions.filters.network.ext_authz.v3.ExtAuthz" + stat_prefix: ext_authz + transport_api_version: V3 + include_peer_certificate: true + failure_mode_allow: {{ .Values.global.connectionFailPolicyAllow }} + grpc_service: + envoy_grpc: + cluster_name: outbound-ext-authz + - applyTo: HTTP_FILTER + match: + proxy: + proxyVersion: ^1\.12.* + context: SIDECAR_OUTBOUND + listener: + filterChain: + filter: + name: "envoy.filters.network.http_connection_manager" + subFilter: + name: "envoy.filters.http.router" + patch: + operation: INSERT_BEFORE + value: + name: "envoy.filters.http.ext_authz" + typed_config: + "@type": "type.googleapis.com/envoy.extensions.filters.http.ext_authz.v3.ExtAuthz" + transport_api_version: V3 + include_peer_certificate: true + failure_mode_allow: {{ .Values.global.connectionFailPolicyAllow }} + grpc_service: + envoy_grpc: + cluster_name: outbound-ext-authz + - applyTo: NETWORK_FILTER + match: + proxy: + proxyVersion: ^1\.12.* + context: SIDECAR_INBOUND + listener: + filterChain: + filter: + name: "envoy.filters.network.tcp_proxy" + patch: + operation: INSERT_BEFORE + value: + name: "envoy.filters.network.ext_authz" + typed_config: + "@type": "type.googleapis.com/envoy.extensions.filters.network.ext_authz.v3.ExtAuthz" + stat_prefix: ext_authz + transport_api_version: V3 + include_peer_certificate: true + failure_mode_allow: {{ .Values.global.connectionFailPolicyAllow }} + grpc_service: + envoy_grpc: + cluster_name: inbound-ext-authz + - applyTo: HTTP_FILTER + match: + proxy: + proxyVersion: ^1\.12.* + context: SIDECAR_INBOUND + listener: + filterChain: + filter: + name: "envoy.filters.network.http_connection_manager" + subFilter: + name: "envoy.filters.http.router" + patch: + operation: INSERT_BEFORE + value: + name: "envoy.filters.http.ext_authz" + typed_config: + "@type": "type.googleapis.com/envoy.extensions.filters.http.ext_authz.v3.ExtAuthz" + transport_api_version: V3 + include_peer_certificate: true + failure_mode_allow: {{ .Values.global.connectionFailPolicyAllow }} + grpc_service: + envoy_grpc: + cluster_name: inbound-ext-authz +{{- end }} +{{- end }} +{{- end }} diff --git a/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/templates/istio/extauth_1.13.yaml b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/templates/istio/extauth_1.13.yaml new file mode 100644 index 00000000..057c85bd --- /dev/null +++ b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/templates/istio/extauth_1.13.yaml @@ -0,0 +1,385 @@ +{{- if .Values.global.isConnectionEnforcementEnabled -}} +{{- if semverCompare "1.13 - 1.16" .Values.istio.global.version -}} +apiVersion: networking.istio.io/v1alpha3 +kind: EnvoyFilter +metadata: + name: ext-authz-1.13 + namespace: istio-system + labels: + owner: {{ include "product.name" . }} + workload-type: pod +spec: + workloadSelector: + labels: + {{ include "product.name" . }}.io/envoy-auth-config: pod + configPatches: + - applyTo: CLUSTER + match: + proxy: + proxyVersion: ^1\.13.* + context: ANY + cluster: + name: "outbound-ext-authz" + patch: + operation: ADD + value: # cluster specification + name: "outbound-ext-authz" + type: STRICT_DNS + http2_protocol_options: {} + connect_timeout: 10s + load_assignment: + cluster_name: "outbound-ext-authz" + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: "{{ include "controller.fullname" . }}.{{ .Release.Namespace }}" + port_value: 24226 + {{- if and .Values.global.isAPISecurityEnabled (not .Values.istio.global.alreadyInstalled) }} + transport_socket: + name: envoy.transport_sockets.tls + typed_config: + "@type": type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.UpstreamTlsContext + sni: outbound_.24226_._.{{ include "controller.fullname" . }}.{{ .Release.Namespace }}.svc.cluster.local + common_tls_context: + alpn_protocols: + - "istio-peer-exchange" + - "istio" + - "h2" + tls_certificate_sds_secret_configs: + - name: default + sds_config: + api_config_source: + api_type: "GRPC" + grpc_services: + - envoy_grpc: + cluster_name: "sds-grpc" + set_node_on_first_message_only: true + transport_api_version: V3 + initial_fetch_timeout: 0s + resource_api_version: V3 + combined_validation_context: + default_validation_context: + match_subject_alt_names: + - exact: spiffe://cluster.local/ns/{{ .Release.Namespace }}/sa/{{ include "controller.serviceAccountName" . }} + validation_context_sds_secret_config: + name: "ROOTCA" + sds_config: + api_config_source: + api_type: "GRPC" + grpc_services: + - envoy_grpc: + cluster_name: "sds-grpc" + set_node_on_first_message_only: true + transport_api_version: V3 + initial_fetch_timeout: 0s + resource_api_version: V3 + {{- end }} + - applyTo: CLUSTER + match: + context: ANY + cluster: + name: "inbound-ext-authz" + proxy: + proxyVersion: ^1\.13.* + patch: + operation: ADD + value: # cluster specification + name: "inbound-ext-authz" + type: STRICT_DNS + http2_protocol_options: {} + connect_timeout: 10s + load_assignment: + cluster_name: "inbound-ext-authz" + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: "{{ include "controller.fullname" . }}.{{ .Release.Namespace }}" + port_value: 24229 + {{- if and .Values.global.isAPISecurityEnabled (not .Values.istio.global.alreadyInstalled) }} + transport_socket: + name: envoy.transport_sockets.tls + typed_config: + "@type": type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.UpstreamTlsContext + sni: outbound_.24229_._.{{ include "controller.fullname" . }}.{{ .Release.Namespace }}.svc.cluster.local + common_tls_context: + alpn_protocols: + - "istio-peer-exchange" + - "istio" + - "h2" + tls_certificate_sds_secret_configs: + - name: default + sds_config: + api_config_source: + api_type: "GRPC" + grpc_services: + - envoy_grpc: + cluster_name: "sds-grpc" + set_node_on_first_message_only: true + transport_api_version: V3 + initial_fetch_timeout: 0s + resource_api_version: V3 + combined_validation_context: + default_validation_context: + match_subject_alt_names: + - exact: spiffe://cluster.local/ns/{{ .Release.Namespace }}/sa/{{ include "controller.serviceAccountName" . }} + validation_context_sds_secret_config: + name: "ROOTCA" + sds_config: + api_config_source: + api_type: "GRPC" + grpc_services: + - envoy_grpc: + cluster_name: "sds-grpc" + set_node_on_first_message_only: true + transport_api_version: V3 + initial_fetch_timeout: 0s + resource_api_version: V3 + {{- end }} + - applyTo: NETWORK_FILTER + match: + proxy: + proxyVersion: ^1\.13.* + context: SIDECAR_OUTBOUND + listener: + filterChain: + filter: + name: "envoy.filters.network.tcp_proxy" + patch: + operation: INSERT_BEFORE + value: + name: "envoy.filters.network.ext_authz" + typed_config: + "@type": "type.googleapis.com/envoy.extensions.filters.network.ext_authz.v3.ExtAuthz" + stat_prefix: ext_authz + transport_api_version: V3 + include_peer_certificate: true + failure_mode_allow: {{ .Values.global.connectionFailPolicyAllow }} + grpc_service: + envoy_grpc: + cluster_name: outbound-ext-authz + - applyTo: HTTP_FILTER + match: + proxy: + proxyVersion: ^1\.13.* + context: SIDECAR_OUTBOUND + listener: + filterChain: + filter: + name: "envoy.filters.network.http_connection_manager" + subFilter: + name: "envoy.filters.http.router" + patch: + operation: INSERT_BEFORE + value: + name: "envoy.filters.http.ext_authz" + typed_config: + "@type": "type.googleapis.com/envoy.extensions.filters.http.ext_authz.v3.ExtAuthz" + transport_api_version: V3 + include_peer_certificate: true + failure_mode_allow: {{ .Values.global.connectionFailPolicyAllow }} + grpc_service: + envoy_grpc: + cluster_name: outbound-ext-authz + - applyTo: NETWORK_FILTER + match: + proxy: + proxyVersion: ^1\.13.* + context: SIDECAR_INBOUND + listener: + filterChain: + filter: + name: "envoy.filters.network.tcp_proxy" + patch: + operation: INSERT_BEFORE + value: + name: "envoy.filters.network.ext_authz" + typed_config: + "@type": "type.googleapis.com/envoy.extensions.filters.network.ext_authz.v3.ExtAuthz" + stat_prefix: ext_authz + transport_api_version: V3 + include_peer_certificate: true + failure_mode_allow: {{ .Values.global.connectionFailPolicyAllow }} + grpc_service: + envoy_grpc: + cluster_name: inbound-ext-authz + - applyTo: HTTP_FILTER + match: + proxy: + proxyVersion: ^1\.13.* + context: SIDECAR_INBOUND + listener: + filterChain: + filter: + name: "envoy.filters.network.http_connection_manager" + subFilter: + name: "envoy.filters.http.router" + patch: + operation: INSERT_BEFORE + value: + name: "envoy.filters.http.ext_authz" + typed_config: + "@type": "type.googleapis.com/envoy.extensions.filters.http.ext_authz.v3.ExtAuthz" + transport_api_version: V3 + include_peer_certificate: true + failure_mode_allow: {{ .Values.global.connectionFailPolicyAllow }} + grpc_service: + envoy_grpc: + cluster_name: inbound-ext-authz +{{- if .Values.istio.expansion.enabled -}} +--- +apiVersion: networking.istio.io/v1alpha3 +kind: EnvoyFilter +metadata: + name: ext-authz-expansion-1.13 + namespace: istio-system + labels: + owner: {{ include "product.name" . }} + workload-type: expansion +spec: + workloadSelector: + labels: + {{ include "product.name" . }}.io/envoy-auth-config: expansion + configPatches: + - applyTo: CLUSTER + match: + proxy: + proxyVersion: ^1\.13.* + context: ANY + cluster: + name: "outbound-ext-authz" + patch: + operation: ADD + value: # cluster specification + name: "outbound-ext-authz" + type: STATIC + http2_protocol_options: {} + connect_timeout: 10s + load_assignment: + cluster_name: "outbound-ext-authz" + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: "127.0.0.1" + port_value: 24226 + - applyTo: CLUSTER + match: + proxy: + proxyVersion: ^1\.13.* + context: ANY + cluster: + name: "inbound-ext-authz" + patch: + operation: ADD + value: # cluster specification + name: "inbound-ext-authz" + type: STATIC + http2_protocol_options: {} + connect_timeout: 10s + load_assignment: + cluster_name: "inbound-ext-authz" + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: "127.0.0.1" + port_value: 24229 + - applyTo: NETWORK_FILTER + match: + proxy: + proxyVersion: ^1\.13.* + context: SIDECAR_OUTBOUND + listener: + filterChain: + filter: + name: "envoy.filters.network.tcp_proxy" + patch: + operation: INSERT_BEFORE + value: + name: "envoy.filters.network.ext_authz" + typed_config: + "@type": "type.googleapis.com/envoy.extensions.filters.network.ext_authz.v3.ExtAuthz" + stat_prefix: ext_authz + transport_api_version: V3 + include_peer_certificate: true + failure_mode_allow: {{ .Values.global.connectionFailPolicyAllow }} + grpc_service: + envoy_grpc: + cluster_name: outbound-ext-authz + - applyTo: HTTP_FILTER + match: + proxy: + proxyVersion: ^1\.13.* + context: SIDECAR_OUTBOUND + listener: + filterChain: + filter: + name: "envoy.filters.network.http_connection_manager" + subFilter: + name: "envoy.filters.http.router" + patch: + operation: INSERT_BEFORE + value: + name: "envoy.filters.http.ext_authz" + typed_config: + "@type": "type.googleapis.com/envoy.extensions.filters.http.ext_authz.v3.ExtAuthz" + transport_api_version: V3 + include_peer_certificate: true + failure_mode_allow: {{ .Values.global.connectionFailPolicyAllow }} + grpc_service: + envoy_grpc: + cluster_name: outbound-ext-authz + - applyTo: NETWORK_FILTER + match: + proxy: + proxyVersion: ^1\.13.* + context: SIDECAR_INBOUND + listener: + filterChain: + filter: + name: "envoy.filters.network.tcp_proxy" + patch: + operation: INSERT_BEFORE + value: + name: "envoy.filters.network.ext_authz" + typed_config: + "@type": "type.googleapis.com/envoy.extensions.filters.network.ext_authz.v3.ExtAuthz" + stat_prefix: ext_authz + transport_api_version: V3 + include_peer_certificate: true + failure_mode_allow: {{ .Values.global.connectionFailPolicyAllow }} + grpc_service: + envoy_grpc: + cluster_name: inbound-ext-authz + - applyTo: HTTP_FILTER + match: + proxy: + proxyVersion: ^1\.13.* + context: SIDECAR_INBOUND + listener: + filterChain: + filter: + name: "envoy.filters.network.http_connection_manager" + subFilter: + name: "envoy.filters.http.router" + patch: + operation: INSERT_BEFORE + value: + name: "envoy.filters.http.ext_authz" + typed_config: + "@type": "type.googleapis.com/envoy.extensions.filters.http.ext_authz.v3.ExtAuthz" + transport_api_version: V3 + include_peer_certificate: true + failure_mode_allow: {{ .Values.global.connectionFailPolicyAllow }} + grpc_service: + envoy_grpc: + cluster_name: inbound-ext-authz +{{- end }} +{{- end }} +{{- end }} diff --git a/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/templates/istio/extauth_1.14.yaml b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/templates/istio/extauth_1.14.yaml new file mode 100644 index 00000000..9e96015b --- /dev/null +++ b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/templates/istio/extauth_1.14.yaml @@ -0,0 +1,385 @@ +{{- if .Values.global.isConnectionEnforcementEnabled -}} +{{- if semverCompare "1.14 - 1.16" .Values.istio.global.version -}} +apiVersion: networking.istio.io/v1alpha3 +kind: EnvoyFilter +metadata: + name: ext-authz-1.14 + namespace: istio-system + labels: + owner: {{ include "product.name" . }} + workload-type: pod +spec: + workloadSelector: + labels: + {{ include "product.name" . }}.io/envoy-auth-config: pod + configPatches: + - applyTo: CLUSTER + match: + proxy: + proxyVersion: ^1\.14.* + context: ANY + cluster: + name: "outbound-ext-authz" + patch: + operation: ADD + value: # cluster specification + name: "outbound-ext-authz" + type: STRICT_DNS + http2_protocol_options: {} + connect_timeout: 10s + load_assignment: + cluster_name: "outbound-ext-authz" + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: "{{ include "controller.fullname" . }}.{{ .Release.Namespace }}" + port_value: 24226 + {{- if and .Values.global.isAPISecurityEnabled (not .Values.istio.global.alreadyInstalled) }} + transport_socket: + name: envoy.transport_sockets.tls + typed_config: + "@type": type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.UpstreamTlsContext + sni: outbound_.24226_._.{{ include "controller.fullname" . }}.{{ .Release.Namespace }}.svc.cluster.local + common_tls_context: + alpn_protocols: + - "istio-peer-exchange" + - "istio" + - "h2" + tls_certificate_sds_secret_configs: + - name: default + sds_config: + api_config_source: + api_type: "GRPC" + grpc_services: + - envoy_grpc: + cluster_name: "sds-grpc" + set_node_on_first_message_only: true + transport_api_version: V3 + initial_fetch_timeout: 0s + resource_api_version: V3 + combined_validation_context: + default_validation_context: + match_subject_alt_names: + - exact: spiffe://cluster.local/ns/{{ .Release.Namespace }}/sa/{{ include "controller.serviceAccountName" . }} + validation_context_sds_secret_config: + name: "ROOTCA" + sds_config: + api_config_source: + api_type: "GRPC" + grpc_services: + - envoy_grpc: + cluster_name: "sds-grpc" + set_node_on_first_message_only: true + transport_api_version: V3 + initial_fetch_timeout: 0s + resource_api_version: V3 + {{- end }} + - applyTo: CLUSTER + match: + context: ANY + cluster: + name: "inbound-ext-authz" + proxy: + proxyVersion: ^1\.14.* + patch: + operation: ADD + value: # cluster specification + name: "inbound-ext-authz" + type: STRICT_DNS + http2_protocol_options: {} + connect_timeout: 10s + load_assignment: + cluster_name: "inbound-ext-authz" + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: "{{ include "controller.fullname" . }}.{{ .Release.Namespace }}" + port_value: 24229 + {{- if and .Values.global.isAPISecurityEnabled (not .Values.istio.global.alreadyInstalled) }} + transport_socket: + name: envoy.transport_sockets.tls + typed_config: + "@type": type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.UpstreamTlsContext + sni: outbound_.24229_._.{{ include "controller.fullname" . }}.{{ .Release.Namespace }}.svc.cluster.local + common_tls_context: + alpn_protocols: + - "istio-peer-exchange" + - "istio" + - "h2" + tls_certificate_sds_secret_configs: + - name: default + sds_config: + api_config_source: + api_type: "GRPC" + grpc_services: + - envoy_grpc: + cluster_name: "sds-grpc" + set_node_on_first_message_only: true + transport_api_version: V3 + initial_fetch_timeout: 0s + resource_api_version: V3 + combined_validation_context: + default_validation_context: + match_subject_alt_names: + - exact: spiffe://cluster.local/ns/{{ .Release.Namespace }}/sa/{{ include "controller.serviceAccountName" . }} + validation_context_sds_secret_config: + name: "ROOTCA" + sds_config: + api_config_source: + api_type: "GRPC" + grpc_services: + - envoy_grpc: + cluster_name: "sds-grpc" + set_node_on_first_message_only: true + transport_api_version: V3 + initial_fetch_timeout: 0s + resource_api_version: V3 + {{- end }} + - applyTo: NETWORK_FILTER + match: + proxy: + proxyVersion: ^1\.14.* + context: SIDECAR_OUTBOUND + listener: + filterChain: + filter: + name: "envoy.filters.network.tcp_proxy" + patch: + operation: INSERT_BEFORE + value: + name: "envoy.filters.network.ext_authz" + typed_config: + "@type": "type.googleapis.com/envoy.extensions.filters.network.ext_authz.v3.ExtAuthz" + stat_prefix: ext_authz + transport_api_version: V3 + include_peer_certificate: true + failure_mode_allow: {{ .Values.global.connectionFailPolicyAllow }} + grpc_service: + envoy_grpc: + cluster_name: outbound-ext-authz + - applyTo: HTTP_FILTER + match: + proxy: + proxyVersion: ^1\.14.* + context: SIDECAR_OUTBOUND + listener: + filterChain: + filter: + name: "envoy.filters.network.http_connection_manager" + subFilter: + name: "envoy.filters.http.router" + patch: + operation: INSERT_BEFORE + value: + name: "envoy.filters.http.ext_authz" + typed_config: + "@type": "type.googleapis.com/envoy.extensions.filters.http.ext_authz.v3.ExtAuthz" + transport_api_version: V3 + include_peer_certificate: true + failure_mode_allow: {{ .Values.global.connectionFailPolicyAllow }} + grpc_service: + envoy_grpc: + cluster_name: outbound-ext-authz + - applyTo: NETWORK_FILTER + match: + proxy: + proxyVersion: ^1\.14.* + context: SIDECAR_INBOUND + listener: + filterChain: + filter: + name: "envoy.filters.network.tcp_proxy" + patch: + operation: INSERT_BEFORE + value: + name: "envoy.filters.network.ext_authz" + typed_config: + "@type": "type.googleapis.com/envoy.extensions.filters.network.ext_authz.v3.ExtAuthz" + stat_prefix: ext_authz + transport_api_version: V3 + include_peer_certificate: true + failure_mode_allow: {{ .Values.global.connectionFailPolicyAllow }} + grpc_service: + envoy_grpc: + cluster_name: inbound-ext-authz + - applyTo: HTTP_FILTER + match: + proxy: + proxyVersion: ^1\.14.* + context: SIDECAR_INBOUND + listener: + filterChain: + filter: + name: "envoy.filters.network.http_connection_manager" + subFilter: + name: "envoy.filters.http.router" + patch: + operation: INSERT_BEFORE + value: + name: "envoy.filters.http.ext_authz" + typed_config: + "@type": "type.googleapis.com/envoy.extensions.filters.http.ext_authz.v3.ExtAuthz" + transport_api_version: V3 + include_peer_certificate: true + failure_mode_allow: {{ .Values.global.connectionFailPolicyAllow }} + grpc_service: + envoy_grpc: + cluster_name: inbound-ext-authz +{{- if .Values.istio.expansion.enabled -}} +--- +apiVersion: networking.istio.io/v1alpha3 +kind: EnvoyFilter +metadata: + name: ext-authz-expansion-1.14 + namespace: istio-system + labels: + owner: {{ include "product.name" . }} + workload-type: expansion +spec: + workloadSelector: + labels: + {{ include "product.name" . }}.io/envoy-auth-config: expansion + configPatches: + - applyTo: CLUSTER + match: + proxy: + proxyVersion: ^1\.14.* + context: ANY + cluster: + name: "outbound-ext-authz" + patch: + operation: ADD + value: # cluster specification + name: "outbound-ext-authz" + type: STATIC + http2_protocol_options: {} + connect_timeout: 10s + load_assignment: + cluster_name: "outbound-ext-authz" + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: "127.0.0.1" + port_value: 24226 + - applyTo: CLUSTER + match: + proxy: + proxyVersion: ^1\.14.* + context: ANY + cluster: + name: "inbound-ext-authz" + patch: + operation: ADD + value: # cluster specification + name: "inbound-ext-authz" + type: STATIC + http2_protocol_options: {} + connect_timeout: 10s + load_assignment: + cluster_name: "inbound-ext-authz" + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: "127.0.0.1" + port_value: 24229 + - applyTo: NETWORK_FILTER + match: + proxy: + proxyVersion: ^1\.14.* + context: SIDECAR_OUTBOUND + listener: + filterChain: + filter: + name: "envoy.filters.network.tcp_proxy" + patch: + operation: INSERT_BEFORE + value: + name: "envoy.filters.network.ext_authz" + typed_config: + "@type": "type.googleapis.com/envoy.extensions.filters.network.ext_authz.v3.ExtAuthz" + stat_prefix: ext_authz + transport_api_version: V3 + include_peer_certificate: true + failure_mode_allow: {{ .Values.global.connectionFailPolicyAllow }} + grpc_service: + envoy_grpc: + cluster_name: outbound-ext-authz + - applyTo: HTTP_FILTER + match: + proxy: + proxyVersion: ^1\.14.* + context: SIDECAR_OUTBOUND + listener: + filterChain: + filter: + name: "envoy.filters.network.http_connection_manager" + subFilter: + name: "envoy.filters.http.router" + patch: + operation: INSERT_BEFORE + value: + name: "envoy.filters.http.ext_authz" + typed_config: + "@type": "type.googleapis.com/envoy.extensions.filters.http.ext_authz.v3.ExtAuthz" + transport_api_version: V3 + include_peer_certificate: true + failure_mode_allow: {{ .Values.global.connectionFailPolicyAllow }} + grpc_service: + envoy_grpc: + cluster_name: outbound-ext-authz + - applyTo: NETWORK_FILTER + match: + proxy: + proxyVersion: ^1\.14.* + context: SIDECAR_INBOUND + listener: + filterChain: + filter: + name: "envoy.filters.network.tcp_proxy" + patch: + operation: INSERT_BEFORE + value: + name: "envoy.filters.network.ext_authz" + typed_config: + "@type": "type.googleapis.com/envoy.extensions.filters.network.ext_authz.v3.ExtAuthz" + stat_prefix: ext_authz + transport_api_version: V3 + include_peer_certificate: true + failure_mode_allow: {{ .Values.global.connectionFailPolicyAllow }} + grpc_service: + envoy_grpc: + cluster_name: inbound-ext-authz + - applyTo: HTTP_FILTER + match: + proxy: + proxyVersion: ^1\.14.* + context: SIDECAR_INBOUND + listener: + filterChain: + filter: + name: "envoy.filters.network.http_connection_manager" + subFilter: + name: "envoy.filters.http.router" + patch: + operation: INSERT_BEFORE + value: + name: "envoy.filters.http.ext_authz" + typed_config: + "@type": "type.googleapis.com/envoy.extensions.filters.http.ext_authz.v3.ExtAuthz" + transport_api_version: V3 + include_peer_certificate: true + failure_mode_allow: {{ .Values.global.connectionFailPolicyAllow }} + grpc_service: + envoy_grpc: + cluster_name: inbound-ext-authz +{{- end }} +{{- end }} +{{- end }} diff --git a/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/templates/istio/extauth_1.15.yaml b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/templates/istio/extauth_1.15.yaml new file mode 100644 index 00000000..147cd7b7 --- /dev/null +++ b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/templates/istio/extauth_1.15.yaml @@ -0,0 +1,385 @@ +{{- if .Values.global.isConnectionEnforcementEnabled -}} +{{- if semverCompare "1.15 - 1.17" .Values.istio.global.version -}} +apiVersion: networking.istio.io/v1alpha3 +kind: EnvoyFilter +metadata: + name: ext-authz-1.15 + namespace: istio-system + labels: + owner: {{ include "product.name" . }} + workload-type: pod +spec: + workloadSelector: + labels: + {{ include "product.name" . }}.io/envoy-auth-config: pod + configPatches: + - applyTo: CLUSTER + match: + proxy: + proxyVersion: ^1\.15.* + context: ANY + cluster: + name: "outbound-ext-authz" + patch: + operation: ADD + value: # cluster specification + name: "outbound-ext-authz" + type: STRICT_DNS + http2_protocol_options: {} + connect_timeout: 10s + load_assignment: + cluster_name: "outbound-ext-authz" + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: "{{ include "controller.fullname" . }}.{{ .Release.Namespace }}" + port_value: 24226 + {{- if and .Values.global.isAPISecurityEnabled (not .Values.istio.global.alreadyInstalled) }} + transport_socket: + name: envoy.transport_sockets.tls + typed_config: + "@type": type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.UpstreamTlsContext + sni: outbound_.24226_._.{{ include "controller.fullname" . }}.{{ .Release.Namespace }}.svc.cluster.local + common_tls_context: + alpn_protocols: + - "istio-peer-exchange" + - "istio" + - "h2" + tls_certificate_sds_secret_configs: + - name: default + sds_config: + api_config_source: + api_type: "GRPC" + grpc_services: + - envoy_grpc: + cluster_name: "sds-grpc" + set_node_on_first_message_only: true + transport_api_version: V3 + initial_fetch_timeout: 0s + resource_api_version: V3 + combined_validation_context: + default_validation_context: + match_subject_alt_names: + - exact: spiffe://cluster.local/ns/{{ .Release.Namespace }}/sa/{{ include "controller.serviceAccountName" . }} + validation_context_sds_secret_config: + name: "ROOTCA" + sds_config: + api_config_source: + api_type: "GRPC" + grpc_services: + - envoy_grpc: + cluster_name: "sds-grpc" + set_node_on_first_message_only: true + transport_api_version: V3 + initial_fetch_timeout: 0s + resource_api_version: V3 + {{- end }} + - applyTo: CLUSTER + match: + context: ANY + cluster: + name: "inbound-ext-authz" + proxy: + proxyVersion: ^1\.15.* + patch: + operation: ADD + value: # cluster specification + name: "inbound-ext-authz" + type: STRICT_DNS + http2_protocol_options: {} + connect_timeout: 10s + load_assignment: + cluster_name: "inbound-ext-authz" + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: "{{ include "controller.fullname" . }}.{{ .Release.Namespace }}" + port_value: 24229 + {{- if and .Values.global.isAPISecurityEnabled (not .Values.istio.global.alreadyInstalled) }} + transport_socket: + name: envoy.transport_sockets.tls + typed_config: + "@type": type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.UpstreamTlsContext + sni: outbound_.24229_._.{{ include "controller.fullname" . }}.{{ .Release.Namespace }}.svc.cluster.local + common_tls_context: + alpn_protocols: + - "istio-peer-exchange" + - "istio" + - "h2" + tls_certificate_sds_secret_configs: + - name: default + sds_config: + api_config_source: + api_type: "GRPC" + grpc_services: + - envoy_grpc: + cluster_name: "sds-grpc" + set_node_on_first_message_only: true + transport_api_version: V3 + initial_fetch_timeout: 0s + resource_api_version: V3 + combined_validation_context: + default_validation_context: + match_subject_alt_names: + - exact: spiffe://cluster.local/ns/{{ .Release.Namespace }}/sa/{{ include "controller.serviceAccountName" . }} + validation_context_sds_secret_config: + name: "ROOTCA" + sds_config: + api_config_source: + api_type: "GRPC" + grpc_services: + - envoy_grpc: + cluster_name: "sds-grpc" + set_node_on_first_message_only: true + transport_api_version: V3 + initial_fetch_timeout: 0s + resource_api_version: V3 + {{- end }} + - applyTo: NETWORK_FILTER + match: + proxy: + proxyVersion: ^1\.15.* + context: SIDECAR_OUTBOUND + listener: + filterChain: + filter: + name: "envoy.filters.network.tcp_proxy" + patch: + operation: INSERT_BEFORE + value: + name: "envoy.filters.network.ext_authz" + typed_config: + "@type": "type.googleapis.com/envoy.extensions.filters.network.ext_authz.v3.ExtAuthz" + stat_prefix: ext_authz + transport_api_version: V3 + include_peer_certificate: true + failure_mode_allow: {{ .Values.global.connectionFailPolicyAllow }} + grpc_service: + envoy_grpc: + cluster_name: outbound-ext-authz + - applyTo: HTTP_FILTER + match: + proxy: + proxyVersion: ^1\.15.* + context: SIDECAR_OUTBOUND + listener: + filterChain: + filter: + name: "envoy.filters.network.http_connection_manager" + subFilter: + name: "envoy.filters.http.router" + patch: + operation: INSERT_BEFORE + value: + name: "envoy.filters.http.ext_authz" + typed_config: + "@type": "type.googleapis.com/envoy.extensions.filters.http.ext_authz.v3.ExtAuthz" + transport_api_version: V3 + include_peer_certificate: true + failure_mode_allow: {{ .Values.global.connectionFailPolicyAllow }} + grpc_service: + envoy_grpc: + cluster_name: outbound-ext-authz + - applyTo: NETWORK_FILTER + match: + proxy: + proxyVersion: ^1\.15.* + context: SIDECAR_INBOUND + listener: + filterChain: + filter: + name: "envoy.filters.network.tcp_proxy" + patch: + operation: INSERT_BEFORE + value: + name: "envoy.filters.network.ext_authz" + typed_config: + "@type": "type.googleapis.com/envoy.extensions.filters.network.ext_authz.v3.ExtAuthz" + stat_prefix: ext_authz + transport_api_version: V3 + include_peer_certificate: true + failure_mode_allow: {{ .Values.global.connectionFailPolicyAllow }} + grpc_service: + envoy_grpc: + cluster_name: inbound-ext-authz + - applyTo: HTTP_FILTER + match: + proxy: + proxyVersion: ^1\.15.* + context: SIDECAR_INBOUND + listener: + filterChain: + filter: + name: "envoy.filters.network.http_connection_manager" + subFilter: + name: "envoy.filters.http.router" + patch: + operation: INSERT_BEFORE + value: + name: "envoy.filters.http.ext_authz" + typed_config: + "@type": "type.googleapis.com/envoy.extensions.filters.http.ext_authz.v3.ExtAuthz" + transport_api_version: V3 + include_peer_certificate: true + failure_mode_allow: {{ .Values.global.connectionFailPolicyAllow }} + grpc_service: + envoy_grpc: + cluster_name: inbound-ext-authz +{{- if .Values.istio.expansion.enabled -}} +--- +apiVersion: networking.istio.io/v1alpha3 +kind: EnvoyFilter +metadata: + name: ext-authz-expansion-1.15 + namespace: istio-system + labels: + owner: {{ include "product.name" . }} + workload-type: expansion +spec: + workloadSelector: + labels: + {{ include "product.name" . }}.io/envoy-auth-config: expansion + configPatches: + - applyTo: CLUSTER + match: + proxy: + proxyVersion: ^1\.15.* + context: ANY + cluster: + name: "outbound-ext-authz" + patch: + operation: ADD + value: # cluster specification + name: "outbound-ext-authz" + type: STATIC + http2_protocol_options: {} + connect_timeout: 10s + load_assignment: + cluster_name: "outbound-ext-authz" + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: "127.0.0.1" + port_value: 24226 + - applyTo: CLUSTER + match: + proxy: + proxyVersion: ^1\.15.* + context: ANY + cluster: + name: "inbound-ext-authz" + patch: + operation: ADD + value: # cluster specification + name: "inbound-ext-authz" + type: STATIC + http2_protocol_options: {} + connect_timeout: 10s + load_assignment: + cluster_name: "inbound-ext-authz" + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: "127.0.0.1" + port_value: 24229 + - applyTo: NETWORK_FILTER + match: + proxy: + proxyVersion: ^1\.15.* + context: SIDECAR_OUTBOUND + listener: + filterChain: + filter: + name: "envoy.filters.network.tcp_proxy" + patch: + operation: INSERT_BEFORE + value: + name: "envoy.filters.network.ext_authz" + typed_config: + "@type": "type.googleapis.com/envoy.extensions.filters.network.ext_authz.v3.ExtAuthz" + stat_prefix: ext_authz + transport_api_version: V3 + include_peer_certificate: true + failure_mode_allow: {{ .Values.global.connectionFailPolicyAllow }} + grpc_service: + envoy_grpc: + cluster_name: outbound-ext-authz + - applyTo: HTTP_FILTER + match: + proxy: + proxyVersion: ^1\.15.* + context: SIDECAR_OUTBOUND + listener: + filterChain: + filter: + name: "envoy.filters.network.http_connection_manager" + subFilter: + name: "envoy.filters.http.router" + patch: + operation: INSERT_BEFORE + value: + name: "envoy.filters.http.ext_authz" + typed_config: + "@type": "type.googleapis.com/envoy.extensions.filters.http.ext_authz.v3.ExtAuthz" + transport_api_version: V3 + include_peer_certificate: true + failure_mode_allow: {{ .Values.global.connectionFailPolicyAllow }} + grpc_service: + envoy_grpc: + cluster_name: outbound-ext-authz + - applyTo: NETWORK_FILTER + match: + proxy: + proxyVersion: ^1\.15.* + context: SIDECAR_INBOUND + listener: + filterChain: + filter: + name: "envoy.filters.network.tcp_proxy" + patch: + operation: INSERT_BEFORE + value: + name: "envoy.filters.network.ext_authz" + typed_config: + "@type": "type.googleapis.com/envoy.extensions.filters.network.ext_authz.v3.ExtAuthz" + stat_prefix: ext_authz + transport_api_version: V3 + include_peer_certificate: true + failure_mode_allow: {{ .Values.global.connectionFailPolicyAllow }} + grpc_service: + envoy_grpc: + cluster_name: inbound-ext-authz + - applyTo: HTTP_FILTER + match: + proxy: + proxyVersion: ^1\.15.* + context: SIDECAR_INBOUND + listener: + filterChain: + filter: + name: "envoy.filters.network.http_connection_manager" + subFilter: + name: "envoy.filters.http.router" + patch: + operation: INSERT_BEFORE + value: + name: "envoy.filters.http.ext_authz" + typed_config: + "@type": "type.googleapis.com/envoy.extensions.filters.http.ext_authz.v3.ExtAuthz" + transport_api_version: V3 + include_peer_certificate: true + failure_mode_allow: {{ .Values.global.connectionFailPolicyAllow }} + grpc_service: + envoy_grpc: + cluster_name: inbound-ext-authz +{{- end }} +{{- end }} +{{- end }} diff --git a/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/templates/istio/extauth_1.16.yaml b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/templates/istio/extauth_1.16.yaml new file mode 100644 index 00000000..e6ff5f8c --- /dev/null +++ b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/templates/istio/extauth_1.16.yaml @@ -0,0 +1,385 @@ +{{- if .Values.global.isConnectionEnforcementEnabled -}} +{{- if semverCompare "1.16 - 1.18" .Values.istio.global.version -}} +apiVersion: networking.istio.io/v1alpha3 +kind: EnvoyFilter +metadata: + name: ext-authz-1.16 + namespace: istio-system + labels: + owner: {{ include "product.name" . }} + workload-type: pod +spec: + workloadSelector: + labels: + {{ include "product.name" . }}.io/envoy-auth-config: pod + configPatches: + - applyTo: CLUSTER + match: + proxy: + proxyVersion: ^1\.16.* + context: ANY + cluster: + name: "outbound-ext-authz" + patch: + operation: ADD + value: # cluster specification + name: "outbound-ext-authz" + type: STRICT_DNS + http2_protocol_options: {} + connect_timeout: 10s + load_assignment: + cluster_name: "outbound-ext-authz" + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: "{{ include "controller.fullname" . }}.{{ .Release.Namespace }}" + port_value: 24226 + {{- if and .Values.global.isAPISecurityEnabled (not .Values.istio.global.alreadyInstalled) }} + transport_socket: + name: envoy.transport_sockets.tls + typed_config: + "@type": type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.UpstreamTlsContext + sni: outbound_.24226_._.{{ include "controller.fullname" . }}.{{ .Release.Namespace }}.svc.cluster.local + common_tls_context: + alpn_protocols: + - "istio-peer-exchange" + - "istio" + - "h2" + tls_certificate_sds_secret_configs: + - name: default + sds_config: + api_config_source: + api_type: "GRPC" + grpc_services: + - envoy_grpc: + cluster_name: "sds-grpc" + set_node_on_first_message_only: true + transport_api_version: V3 + initial_fetch_timeout: 0s + resource_api_version: V3 + combined_validation_context: + default_validation_context: + match_subject_alt_names: + - exact: spiffe://cluster.local/ns/{{ .Release.Namespace }}/sa/{{ include "controller.serviceAccountName" . }} + validation_context_sds_secret_config: + name: "ROOTCA" + sds_config: + api_config_source: + api_type: "GRPC" + grpc_services: + - envoy_grpc: + cluster_name: "sds-grpc" + set_node_on_first_message_only: true + transport_api_version: V3 + initial_fetch_timeout: 0s + resource_api_version: V3 + {{- end }} + - applyTo: CLUSTER + match: + context: ANY + cluster: + name: "inbound-ext-authz" + proxy: + proxyVersion: ^1\.16.* + patch: + operation: ADD + value: # cluster specification + name: "inbound-ext-authz" + type: STRICT_DNS + http2_protocol_options: {} + connect_timeout: 10s + load_assignment: + cluster_name: "inbound-ext-authz" + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: "{{ include "controller.fullname" . }}.{{ .Release.Namespace }}" + port_value: 24229 + {{- if and .Values.global.isAPISecurityEnabled (not .Values.istio.global.alreadyInstalled) }} + transport_socket: + name: envoy.transport_sockets.tls + typed_config: + "@type": type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.UpstreamTlsContext + sni: outbound_.24229_._.{{ include "controller.fullname" . }}.{{ .Release.Namespace }}.svc.cluster.local + common_tls_context: + alpn_protocols: + - "istio-peer-exchange" + - "istio" + - "h2" + tls_certificate_sds_secret_configs: + - name: default + sds_config: + api_config_source: + api_type: "GRPC" + grpc_services: + - envoy_grpc: + cluster_name: "sds-grpc" + set_node_on_first_message_only: true + transport_api_version: V3 + initial_fetch_timeout: 0s + resource_api_version: V3 + combined_validation_context: + default_validation_context: + match_subject_alt_names: + - exact: spiffe://cluster.local/ns/{{ .Release.Namespace }}/sa/{{ include "controller.serviceAccountName" . }} + validation_context_sds_secret_config: + name: "ROOTCA" + sds_config: + api_config_source: + api_type: "GRPC" + grpc_services: + - envoy_grpc: + cluster_name: "sds-grpc" + set_node_on_first_message_only: true + transport_api_version: V3 + initial_fetch_timeout: 0s + resource_api_version: V3 + {{- end }} + - applyTo: NETWORK_FILTER + match: + proxy: + proxyVersion: ^1\.16.* + context: SIDECAR_OUTBOUND + listener: + filterChain: + filter: + name: "envoy.filters.network.tcp_proxy" + patch: + operation: INSERT_BEFORE + value: + name: "envoy.filters.network.ext_authz" + typed_config: + "@type": "type.googleapis.com/envoy.extensions.filters.network.ext_authz.v3.ExtAuthz" + stat_prefix: ext_authz + transport_api_version: V3 + include_peer_certificate: true + failure_mode_allow: {{ .Values.global.connectionFailPolicyAllow }} + grpc_service: + envoy_grpc: + cluster_name: outbound-ext-authz + - applyTo: HTTP_FILTER + match: + proxy: + proxyVersion: ^1\.16.* + context: SIDECAR_OUTBOUND + listener: + filterChain: + filter: + name: "envoy.filters.network.http_connection_manager" + subFilter: + name: "envoy.filters.http.router" + patch: + operation: INSERT_BEFORE + value: + name: "envoy.filters.http.ext_authz" + typed_config: + "@type": "type.googleapis.com/envoy.extensions.filters.http.ext_authz.v3.ExtAuthz" + transport_api_version: V3 + include_peer_certificate: true + failure_mode_allow: {{ .Values.global.connectionFailPolicyAllow }} + grpc_service: + envoy_grpc: + cluster_name: outbound-ext-authz + - applyTo: NETWORK_FILTER + match: + proxy: + proxyVersion: ^1\.16.* + context: SIDECAR_INBOUND + listener: + filterChain: + filter: + name: "envoy.filters.network.tcp_proxy" + patch: + operation: INSERT_BEFORE + value: + name: "envoy.filters.network.ext_authz" + typed_config: + "@type": "type.googleapis.com/envoy.extensions.filters.network.ext_authz.v3.ExtAuthz" + stat_prefix: ext_authz + transport_api_version: V3 + include_peer_certificate: true + failure_mode_allow: {{ .Values.global.connectionFailPolicyAllow }} + grpc_service: + envoy_grpc: + cluster_name: inbound-ext-authz + - applyTo: HTTP_FILTER + match: + proxy: + proxyVersion: ^1\.16.* + context: SIDECAR_INBOUND + listener: + filterChain: + filter: + name: "envoy.filters.network.http_connection_manager" + subFilter: + name: "envoy.filters.http.router" + patch: + operation: INSERT_BEFORE + value: + name: "envoy.filters.http.ext_authz" + typed_config: + "@type": "type.googleapis.com/envoy.extensions.filters.http.ext_authz.v3.ExtAuthz" + transport_api_version: V3 + include_peer_certificate: true + failure_mode_allow: {{ .Values.global.connectionFailPolicyAllow }} + grpc_service: + envoy_grpc: + cluster_name: inbound-ext-authz +{{- if .Values.istio.expansion.enabled -}} +--- +apiVersion: networking.istio.io/v1alpha3 +kind: EnvoyFilter +metadata: + name: ext-authz-expansion-1.16 + namespace: istio-system + labels: + owner: {{ include "product.name" . }} + workload-type: expansion +spec: + workloadSelector: + labels: + {{ include "product.name" . }}.io/envoy-auth-config: expansion + configPatches: + - applyTo: CLUSTER + match: + proxy: + proxyVersion: ^1\.16.* + context: ANY + cluster: + name: "outbound-ext-authz" + patch: + operation: ADD + value: # cluster specification + name: "outbound-ext-authz" + type: STATIC + http2_protocol_options: {} + connect_timeout: 10s + load_assignment: + cluster_name: "outbound-ext-authz" + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: "127.0.0.1" + port_value: 24226 + - applyTo: CLUSTER + match: + proxy: + proxyVersion: ^1\.16.* + context: ANY + cluster: + name: "inbound-ext-authz" + patch: + operation: ADD + value: # cluster specification + name: "inbound-ext-authz" + type: STATIC + http2_protocol_options: {} + connect_timeout: 10s + load_assignment: + cluster_name: "inbound-ext-authz" + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: "127.0.0.1" + port_value: 24229 + - applyTo: NETWORK_FILTER + match: + proxy: + proxyVersion: ^1\.16.* + context: SIDECAR_OUTBOUND + listener: + filterChain: + filter: + name: "envoy.filters.network.tcp_proxy" + patch: + operation: INSERT_BEFORE + value: + name: "envoy.filters.network.ext_authz" + typed_config: + "@type": "type.googleapis.com/envoy.extensions.filters.network.ext_authz.v3.ExtAuthz" + stat_prefix: ext_authz + transport_api_version: V3 + include_peer_certificate: true + failure_mode_allow: {{ .Values.global.connectionFailPolicyAllow }} + grpc_service: + envoy_grpc: + cluster_name: outbound-ext-authz + - applyTo: HTTP_FILTER + match: + proxy: + proxyVersion: ^1\.16.* + context: SIDECAR_OUTBOUND + listener: + filterChain: + filter: + name: "envoy.filters.network.http_connection_manager" + subFilter: + name: "envoy.filters.http.router" + patch: + operation: INSERT_BEFORE + value: + name: "envoy.filters.http.ext_authz" + typed_config: + "@type": "type.googleapis.com/envoy.extensions.filters.http.ext_authz.v3.ExtAuthz" + transport_api_version: V3 + include_peer_certificate: true + failure_mode_allow: {{ .Values.global.connectionFailPolicyAllow }} + grpc_service: + envoy_grpc: + cluster_name: outbound-ext-authz + - applyTo: NETWORK_FILTER + match: + proxy: + proxyVersion: ^1\.16.* + context: SIDECAR_INBOUND + listener: + filterChain: + filter: + name: "envoy.filters.network.tcp_proxy" + patch: + operation: INSERT_BEFORE + value: + name: "envoy.filters.network.ext_authz" + typed_config: + "@type": "type.googleapis.com/envoy.extensions.filters.network.ext_authz.v3.ExtAuthz" + stat_prefix: ext_authz + transport_api_version: V3 + include_peer_certificate: true + failure_mode_allow: {{ .Values.global.connectionFailPolicyAllow }} + grpc_service: + envoy_grpc: + cluster_name: inbound-ext-authz + - applyTo: HTTP_FILTER + match: + proxy: + proxyVersion: ^1\.16.* + context: SIDECAR_INBOUND + listener: + filterChain: + filter: + name: "envoy.filters.network.http_connection_manager" + subFilter: + name: "envoy.filters.http.router" + patch: + operation: INSERT_BEFORE + value: + name: "envoy.filters.http.ext_authz" + typed_config: + "@type": "type.googleapis.com/envoy.extensions.filters.http.ext_authz.v3.ExtAuthz" + transport_api_version: V3 + include_peer_certificate: true + failure_mode_allow: {{ .Values.global.connectionFailPolicyAllow }} + grpc_service: + envoy_grpc: + cluster_name: inbound-ext-authz +{{- end }} +{{- end }} +{{- end }} diff --git a/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/templates/istio/extauth_1.17.yaml b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/templates/istio/extauth_1.17.yaml new file mode 100644 index 00000000..f8951b8b --- /dev/null +++ b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/templates/istio/extauth_1.17.yaml @@ -0,0 +1,385 @@ +{{- if .Values.global.isConnectionEnforcementEnabled -}} +{{- if semverCompare "1.17 - 1.19" .Values.istio.global.version -}} +apiVersion: networking.istio.io/v1alpha3 +kind: EnvoyFilter +metadata: + name: ext-authz-1.17 + namespace: istio-system + labels: + owner: {{ include "product.name" . }} + workload-type: pod +spec: + workloadSelector: + labels: + {{ include "product.name" . }}.io/envoy-auth-config: pod + configPatches: + - applyTo: CLUSTER + match: + proxy: + proxyVersion: ^1\.17.* + context: ANY + cluster: + name: "outbound-ext-authz" + patch: + operation: ADD + value: # cluster specification + name: "outbound-ext-authz" + type: STRICT_DNS + http2_protocol_options: {} + connect_timeout: 10s + load_assignment: + cluster_name: "outbound-ext-authz" + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: "{{ include "controller.fullname" . }}.{{ .Release.Namespace }}" + port_value: 24226 + {{- if and .Values.global.isAPISecurityEnabled (not .Values.istio.global.alreadyInstalled) }} + transport_socket: + name: envoy.transport_sockets.tls + typed_config: + "@type": type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.UpstreamTlsContext + sni: outbound_.24226_._.{{ include "controller.fullname" . }}.{{ .Release.Namespace }}.svc.cluster.local + common_tls_context: + alpn_protocols: + - "istio-peer-exchange" + - "istio" + - "h2" + tls_certificate_sds_secret_configs: + - name: default + sds_config: + api_config_source: + api_type: "GRPC" + grpc_services: + - envoy_grpc: + cluster_name: "sds-grpc" + set_node_on_first_message_only: true + transport_api_version: V3 + initial_fetch_timeout: 0s + resource_api_version: V3 + combined_validation_context: + default_validation_context: + match_subject_alt_names: + - exact: spiffe://cluster.local/ns/{{ .Release.Namespace }}/sa/{{ include "controller.serviceAccountName" . }} + validation_context_sds_secret_config: + name: "ROOTCA" + sds_config: + api_config_source: + api_type: "GRPC" + grpc_services: + - envoy_grpc: + cluster_name: "sds-grpc" + set_node_on_first_message_only: true + transport_api_version: V3 + initial_fetch_timeout: 0s + resource_api_version: V3 + {{- end }} + - applyTo: CLUSTER + match: + context: ANY + cluster: + name: "inbound-ext-authz" + proxy: + proxyVersion: ^1\.17.* + patch: + operation: ADD + value: # cluster specification + name: "inbound-ext-authz" + type: STRICT_DNS + http2_protocol_options: {} + connect_timeout: 10s + load_assignment: + cluster_name: "inbound-ext-authz" + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: "{{ include "controller.fullname" . }}.{{ .Release.Namespace }}" + port_value: 24229 + {{- if and .Values.global.isAPISecurityEnabled (not .Values.istio.global.alreadyInstalled) }} + transport_socket: + name: envoy.transport_sockets.tls + typed_config: + "@type": type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.UpstreamTlsContext + sni: outbound_.24229_._.{{ include "controller.fullname" . }}.{{ .Release.Namespace }}.svc.cluster.local + common_tls_context: + alpn_protocols: + - "istio-peer-exchange" + - "istio" + - "h2" + tls_certificate_sds_secret_configs: + - name: default + sds_config: + api_config_source: + api_type: "GRPC" + grpc_services: + - envoy_grpc: + cluster_name: "sds-grpc" + set_node_on_first_message_only: true + transport_api_version: V3 + initial_fetch_timeout: 0s + resource_api_version: V3 + combined_validation_context: + default_validation_context: + match_subject_alt_names: + - exact: spiffe://cluster.local/ns/{{ .Release.Namespace }}/sa/{{ include "controller.serviceAccountName" . }} + validation_context_sds_secret_config: + name: "ROOTCA" + sds_config: + api_config_source: + api_type: "GRPC" + grpc_services: + - envoy_grpc: + cluster_name: "sds-grpc" + set_node_on_first_message_only: true + transport_api_version: V3 + initial_fetch_timeout: 0s + resource_api_version: V3 + {{- end }} + - applyTo: NETWORK_FILTER + match: + proxy: + proxyVersion: ^1\.17.* + context: SIDECAR_OUTBOUND + listener: + filterChain: + filter: + name: "envoy.filters.network.tcp_proxy" + patch: + operation: INSERT_BEFORE + value: + name: "envoy.filters.network.ext_authz" + typed_config: + "@type": "type.googleapis.com/envoy.extensions.filters.network.ext_authz.v3.ExtAuthz" + stat_prefix: ext_authz + transport_api_version: V3 + include_peer_certificate: true + failure_mode_allow: {{ .Values.global.connectionFailPolicyAllow }} + grpc_service: + envoy_grpc: + cluster_name: outbound-ext-authz + - applyTo: HTTP_FILTER + match: + proxy: + proxyVersion: ^1\.17.* + context: SIDECAR_OUTBOUND + listener: + filterChain: + filter: + name: "envoy.filters.network.http_connection_manager" + subFilter: + name: "envoy.filters.http.router" + patch: + operation: INSERT_BEFORE + value: + name: "envoy.filters.http.ext_authz" + typed_config: + "@type": "type.googleapis.com/envoy.extensions.filters.http.ext_authz.v3.ExtAuthz" + transport_api_version: V3 + include_peer_certificate: true + failure_mode_allow: {{ .Values.global.connectionFailPolicyAllow }} + grpc_service: + envoy_grpc: + cluster_name: outbound-ext-authz + - applyTo: NETWORK_FILTER + match: + proxy: + proxyVersion: ^1\.17.* + context: SIDECAR_INBOUND + listener: + filterChain: + filter: + name: "envoy.filters.network.tcp_proxy" + patch: + operation: INSERT_BEFORE + value: + name: "envoy.filters.network.ext_authz" + typed_config: + "@type": "type.googleapis.com/envoy.extensions.filters.network.ext_authz.v3.ExtAuthz" + stat_prefix: ext_authz + transport_api_version: V3 + include_peer_certificate: true + failure_mode_allow: {{ .Values.global.connectionFailPolicyAllow }} + grpc_service: + envoy_grpc: + cluster_name: inbound-ext-authz + - applyTo: HTTP_FILTER + match: + proxy: + proxyVersion: ^1\.17.* + context: SIDECAR_INBOUND + listener: + filterChain: + filter: + name: "envoy.filters.network.http_connection_manager" + subFilter: + name: "envoy.filters.http.router" + patch: + operation: INSERT_BEFORE + value: + name: "envoy.filters.http.ext_authz" + typed_config: + "@type": "type.googleapis.com/envoy.extensions.filters.http.ext_authz.v3.ExtAuthz" + transport_api_version: V3 + include_peer_certificate: true + failure_mode_allow: {{ .Values.global.connectionFailPolicyAllow }} + grpc_service: + envoy_grpc: + cluster_name: inbound-ext-authz +{{- if .Values.istio.expansion.enabled -}} +--- +apiVersion: networking.istio.io/v1alpha3 +kind: EnvoyFilter +metadata: + name: ext-authz-expansion-1.17 + namespace: istio-system + labels: + owner: {{ include "product.name" . }} + workload-type: expansion +spec: + workloadSelector: + labels: + {{ include "product.name" . }}.io/envoy-auth-config: expansion + configPatches: + - applyTo: CLUSTER + match: + proxy: + proxyVersion: ^1\.17.* + context: ANY + cluster: + name: "outbound-ext-authz" + patch: + operation: ADD + value: # cluster specification + name: "outbound-ext-authz" + type: STATIC + http2_protocol_options: {} + connect_timeout: 10s + load_assignment: + cluster_name: "outbound-ext-authz" + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: "127.0.0.1" + port_value: 24226 + - applyTo: CLUSTER + match: + proxy: + proxyVersion: ^1\.17.* + context: ANY + cluster: + name: "inbound-ext-authz" + patch: + operation: ADD + value: # cluster specification + name: "inbound-ext-authz" + type: STATIC + http2_protocol_options: {} + connect_timeout: 10s + load_assignment: + cluster_name: "inbound-ext-authz" + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: "127.0.0.1" + port_value: 24229 + - applyTo: NETWORK_FILTER + match: + proxy: + proxyVersion: ^1\.17.* + context: SIDECAR_OUTBOUND + listener: + filterChain: + filter: + name: "envoy.filters.network.tcp_proxy" + patch: + operation: INSERT_BEFORE + value: + name: "envoy.filters.network.ext_authz" + typed_config: + "@type": "type.googleapis.com/envoy.extensions.filters.network.ext_authz.v3.ExtAuthz" + stat_prefix: ext_authz + transport_api_version: V3 + include_peer_certificate: true + failure_mode_allow: {{ .Values.global.connectionFailPolicyAllow }} + grpc_service: + envoy_grpc: + cluster_name: outbound-ext-authz + - applyTo: HTTP_FILTER + match: + proxy: + proxyVersion: ^1\.17.* + context: SIDECAR_OUTBOUND + listener: + filterChain: + filter: + name: "envoy.filters.network.http_connection_manager" + subFilter: + name: "envoy.filters.http.router" + patch: + operation: INSERT_BEFORE + value: + name: "envoy.filters.http.ext_authz" + typed_config: + "@type": "type.googleapis.com/envoy.extensions.filters.http.ext_authz.v3.ExtAuthz" + transport_api_version: V3 + include_peer_certificate: true + failure_mode_allow: {{ .Values.global.connectionFailPolicyAllow }} + grpc_service: + envoy_grpc: + cluster_name: outbound-ext-authz + - applyTo: NETWORK_FILTER + match: + proxy: + proxyVersion: ^1\.17.* + context: SIDECAR_INBOUND + listener: + filterChain: + filter: + name: "envoy.filters.network.tcp_proxy" + patch: + operation: INSERT_BEFORE + value: + name: "envoy.filters.network.ext_authz" + typed_config: + "@type": "type.googleapis.com/envoy.extensions.filters.network.ext_authz.v3.ExtAuthz" + stat_prefix: ext_authz + transport_api_version: V3 + include_peer_certificate: true + failure_mode_allow: {{ .Values.global.connectionFailPolicyAllow }} + grpc_service: + envoy_grpc: + cluster_name: inbound-ext-authz + - applyTo: HTTP_FILTER + match: + proxy: + proxyVersion: ^1\.17.* + context: SIDECAR_INBOUND + listener: + filterChain: + filter: + name: "envoy.filters.network.http_connection_manager" + subFilter: + name: "envoy.filters.http.router" + patch: + operation: INSERT_BEFORE + value: + name: "envoy.filters.http.ext_authz" + typed_config: + "@type": "type.googleapis.com/envoy.extensions.filters.http.ext_authz.v3.ExtAuthz" + transport_api_version: V3 + include_peer_certificate: true + failure_mode_allow: {{ .Values.global.connectionFailPolicyAllow }} + grpc_service: + envoy_grpc: + cluster_name: inbound-ext-authz +{{- end }} +{{- end }} +{{- end }} diff --git a/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/templates/istio/extauth_1.18.yaml b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/templates/istio/extauth_1.18.yaml new file mode 100644 index 00000000..2c0495d6 --- /dev/null +++ b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/templates/istio/extauth_1.18.yaml @@ -0,0 +1,385 @@ +{{- if .Values.global.isConnectionEnforcementEnabled -}} +{{- if semverCompare "1.18 - 1.20" .Values.istio.global.version -}} +apiVersion: networking.istio.io/v1alpha3 +kind: EnvoyFilter +metadata: + name: ext-authz-1.18 + namespace: istio-system + labels: + owner: {{ include "product.name" . }} + workload-type: pod +spec: + workloadSelector: + labels: + {{ include "product.name" . }}.io/envoy-auth-config: pod + configPatches: + - applyTo: CLUSTER + match: + proxy: + proxyVersion: ^1\.18.* + context: ANY + cluster: + name: "outbound-ext-authz" + patch: + operation: ADD + value: # cluster specification + name: "outbound-ext-authz" + type: STRICT_DNS + http2_protocol_options: {} + connect_timeout: 10s + load_assignment: + cluster_name: "outbound-ext-authz" + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: "{{ include "controller.fullname" . }}.{{ .Release.Namespace }}" + port_value: 24226 + {{- if and .Values.global.isAPISecurityEnabled (not .Values.istio.global.alreadyInstalled) }} + transport_socket: + name: envoy.transport_sockets.tls + typed_config: + "@type": type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.UpstreamTlsContext + sni: outbound_.24226_._.{{ include "controller.fullname" . }}.{{ .Release.Namespace }}.svc.cluster.local + common_tls_context: + alpn_protocols: + - "istio-peer-exchange" + - "istio" + - "h2" + tls_certificate_sds_secret_configs: + - name: default + sds_config: + api_config_source: + api_type: "GRPC" + grpc_services: + - envoy_grpc: + cluster_name: "sds-grpc" + set_node_on_first_message_only: true + transport_api_version: V3 + initial_fetch_timeout: 0s + resource_api_version: V3 + combined_validation_context: + default_validation_context: + match_subject_alt_names: + - exact: spiffe://cluster.local/ns/{{ .Release.Namespace }}/sa/{{ include "controller.serviceAccountName" . }} + validation_context_sds_secret_config: + name: "ROOTCA" + sds_config: + api_config_source: + api_type: "GRPC" + grpc_services: + - envoy_grpc: + cluster_name: "sds-grpc" + set_node_on_first_message_only: true + transport_api_version: V3 + initial_fetch_timeout: 0s + resource_api_version: V3 + {{- end }} + - applyTo: CLUSTER + match: + context: ANY + cluster: + name: "inbound-ext-authz" + proxy: + proxyVersion: ^1\.18.* + patch: + operation: ADD + value: # cluster specification + name: "inbound-ext-authz" + type: STRICT_DNS + http2_protocol_options: {} + connect_timeout: 10s + load_assignment: + cluster_name: "inbound-ext-authz" + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: "{{ include "controller.fullname" . }}.{{ .Release.Namespace }}" + port_value: 24229 + {{- if and .Values.global.isAPISecurityEnabled (not .Values.istio.global.alreadyInstalled) }} + transport_socket: + name: envoy.transport_sockets.tls + typed_config: + "@type": type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.UpstreamTlsContext + sni: outbound_.24229_._.{{ include "controller.fullname" . }}.{{ .Release.Namespace }}.svc.cluster.local + common_tls_context: + alpn_protocols: + - "istio-peer-exchange" + - "istio" + - "h2" + tls_certificate_sds_secret_configs: + - name: default + sds_config: + api_config_source: + api_type: "GRPC" + grpc_services: + - envoy_grpc: + cluster_name: "sds-grpc" + set_node_on_first_message_only: true + transport_api_version: V3 + initial_fetch_timeout: 0s + resource_api_version: V3 + combined_validation_context: + default_validation_context: + match_subject_alt_names: + - exact: spiffe://cluster.local/ns/{{ .Release.Namespace }}/sa/{{ include "controller.serviceAccountName" . }} + validation_context_sds_secret_config: + name: "ROOTCA" + sds_config: + api_config_source: + api_type: "GRPC" + grpc_services: + - envoy_grpc: + cluster_name: "sds-grpc" + set_node_on_first_message_only: true + transport_api_version: V3 + initial_fetch_timeout: 0s + resource_api_version: V3 + {{- end }} + - applyTo: NETWORK_FILTER + match: + proxy: + proxyVersion: ^1\.18.* + context: SIDECAR_OUTBOUND + listener: + filterChain: + filter: + name: "envoy.filters.network.tcp_proxy" + patch: + operation: INSERT_BEFORE + value: + name: "envoy.filters.network.ext_authz" + typed_config: + "@type": "type.googleapis.com/envoy.extensions.filters.network.ext_authz.v3.ExtAuthz" + stat_prefix: ext_authz + transport_api_version: V3 + include_peer_certificate: true + failure_mode_allow: {{ .Values.global.connectionFailPolicyAllow }} + grpc_service: + envoy_grpc: + cluster_name: outbound-ext-authz + - applyTo: HTTP_FILTER + match: + proxy: + proxyVersion: ^1\.18.* + context: SIDECAR_OUTBOUND + listener: + filterChain: + filter: + name: "envoy.filters.network.http_connection_manager" + subFilter: + name: "envoy.filters.http.router" + patch: + operation: INSERT_BEFORE + value: + name: "envoy.filters.http.ext_authz" + typed_config: + "@type": "type.googleapis.com/envoy.extensions.filters.http.ext_authz.v3.ExtAuthz" + transport_api_version: V3 + include_peer_certificate: true + failure_mode_allow: {{ .Values.global.connectionFailPolicyAllow }} + grpc_service: + envoy_grpc: + cluster_name: outbound-ext-authz + - applyTo: NETWORK_FILTER + match: + proxy: + proxyVersion: ^1\.18.* + context: SIDECAR_INBOUND + listener: + filterChain: + filter: + name: "envoy.filters.network.tcp_proxy" + patch: + operation: INSERT_BEFORE + value: + name: "envoy.filters.network.ext_authz" + typed_config: + "@type": "type.googleapis.com/envoy.extensions.filters.network.ext_authz.v3.ExtAuthz" + stat_prefix: ext_authz + transport_api_version: V3 + include_peer_certificate: true + failure_mode_allow: {{ .Values.global.connectionFailPolicyAllow }} + grpc_service: + envoy_grpc: + cluster_name: inbound-ext-authz + - applyTo: HTTP_FILTER + match: + proxy: + proxyVersion: ^1\.18.* + context: SIDECAR_INBOUND + listener: + filterChain: + filter: + name: "envoy.filters.network.http_connection_manager" + subFilter: + name: "envoy.filters.http.router" + patch: + operation: INSERT_BEFORE + value: + name: "envoy.filters.http.ext_authz" + typed_config: + "@type": "type.googleapis.com/envoy.extensions.filters.http.ext_authz.v3.ExtAuthz" + transport_api_version: V3 + include_peer_certificate: true + failure_mode_allow: {{ .Values.global.connectionFailPolicyAllow }} + grpc_service: + envoy_grpc: + cluster_name: inbound-ext-authz +{{- if .Values.istio.expansion.enabled -}} +--- +apiVersion: networking.istio.io/v1alpha3 +kind: EnvoyFilter +metadata: + name: ext-authz-expansion-1.18 + namespace: istio-system + labels: + owner: {{ include "product.name" . }} + workload-type: expansion +spec: + workloadSelector: + labels: + {{ include "product.name" . }}.io/envoy-auth-config: expansion + configPatches: + - applyTo: CLUSTER + match: + proxy: + proxyVersion: ^1\.18.* + context: ANY + cluster: + name: "outbound-ext-authz" + patch: + operation: ADD + value: # cluster specification + name: "outbound-ext-authz" + type: STATIC + http2_protocol_options: {} + connect_timeout: 10s + load_assignment: + cluster_name: "outbound-ext-authz" + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: "127.0.0.1" + port_value: 24226 + - applyTo: CLUSTER + match: + proxy: + proxyVersion: ^1\.18.* + context: ANY + cluster: + name: "inbound-ext-authz" + patch: + operation: ADD + value: # cluster specification + name: "inbound-ext-authz" + type: STATIC + http2_protocol_options: {} + connect_timeout: 10s + load_assignment: + cluster_name: "inbound-ext-authz" + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: "127.0.0.1" + port_value: 24229 + - applyTo: NETWORK_FILTER + match: + proxy: + proxyVersion: ^1\.18.* + context: SIDECAR_OUTBOUND + listener: + filterChain: + filter: + name: "envoy.filters.network.tcp_proxy" + patch: + operation: INSERT_BEFORE + value: + name: "envoy.filters.network.ext_authz" + typed_config: + "@type": "type.googleapis.com/envoy.extensions.filters.network.ext_authz.v3.ExtAuthz" + stat_prefix: ext_authz + transport_api_version: V3 + include_peer_certificate: true + failure_mode_allow: {{ .Values.global.connectionFailPolicyAllow }} + grpc_service: + envoy_grpc: + cluster_name: outbound-ext-authz + - applyTo: HTTP_FILTER + match: + proxy: + proxyVersion: ^1\.18.* + context: SIDECAR_OUTBOUND + listener: + filterChain: + filter: + name: "envoy.filters.network.http_connection_manager" + subFilter: + name: "envoy.filters.http.router" + patch: + operation: INSERT_BEFORE + value: + name: "envoy.filters.http.ext_authz" + typed_config: + "@type": "type.googleapis.com/envoy.extensions.filters.http.ext_authz.v3.ExtAuthz" + transport_api_version: V3 + include_peer_certificate: true + failure_mode_allow: {{ .Values.global.connectionFailPolicyAllow }} + grpc_service: + envoy_grpc: + cluster_name: outbound-ext-authz + - applyTo: NETWORK_FILTER + match: + proxy: + proxyVersion: ^1\.18.* + context: SIDECAR_INBOUND + listener: + filterChain: + filter: + name: "envoy.filters.network.tcp_proxy" + patch: + operation: INSERT_BEFORE + value: + name: "envoy.filters.network.ext_authz" + typed_config: + "@type": "type.googleapis.com/envoy.extensions.filters.network.ext_authz.v3.ExtAuthz" + stat_prefix: ext_authz + transport_api_version: V3 + include_peer_certificate: true + failure_mode_allow: {{ .Values.global.connectionFailPolicyAllow }} + grpc_service: + envoy_grpc: + cluster_name: inbound-ext-authz + - applyTo: HTTP_FILTER + match: + proxy: + proxyVersion: ^1\.18.* + context: SIDECAR_INBOUND + listener: + filterChain: + filter: + name: "envoy.filters.network.http_connection_manager" + subFilter: + name: "envoy.filters.http.router" + patch: + operation: INSERT_BEFORE + value: + name: "envoy.filters.http.ext_authz" + typed_config: + "@type": "type.googleapis.com/envoy.extensions.filters.http.ext_authz.v3.ExtAuthz" + transport_api_version: V3 + include_peer_certificate: true + failure_mode_allow: {{ .Values.global.connectionFailPolicyAllow }} + grpc_service: + envoy_grpc: + cluster_name: inbound-ext-authz +{{- end }} +{{- end }} +{{- end }} diff --git a/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/templates/istio/extauth_1.19.yaml b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/templates/istio/extauth_1.19.yaml new file mode 100644 index 00000000..473357ad --- /dev/null +++ b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/templates/istio/extauth_1.19.yaml @@ -0,0 +1,385 @@ +{{- if .Values.global.isConnectionEnforcementEnabled -}} +{{- if semverCompare "1.19 - 1.21" .Values.istio.global.version -}} +apiVersion: networking.istio.io/v1alpha3 +kind: EnvoyFilter +metadata: + name: ext-authz-1.19 + namespace: istio-system + labels: + owner: {{ include "product.name" . }} + workload-type: pod +spec: + workloadSelector: + labels: + {{ include "product.name" . }}.io/envoy-auth-config: pod + configPatches: + - applyTo: CLUSTER + match: + proxy: + proxyVersion: ^1\.19.* + context: ANY + cluster: + name: "outbound-ext-authz" + patch: + operation: ADD + value: # cluster specification + name: "outbound-ext-authz" + type: STRICT_DNS + http2_protocol_options: {} + connect_timeout: 10s + load_assignment: + cluster_name: "outbound-ext-authz" + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: "{{ include "controller.fullname" . }}.{{ .Release.Namespace }}" + port_value: 24226 + {{- if and .Values.global.isAPISecurityEnabled (not .Values.istio.global.alreadyInstalled) }} + transport_socket: + name: envoy.transport_sockets.tls + typed_config: + "@type": type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.UpstreamTlsContext + sni: outbound_.24226_._.{{ include "controller.fullname" . }}.{{ .Release.Namespace }}.svc.cluster.local + common_tls_context: + alpn_protocols: + - "istio-peer-exchange" + - "istio" + - "h2" + tls_certificate_sds_secret_configs: + - name: default + sds_config: + api_config_source: + api_type: "GRPC" + grpc_services: + - envoy_grpc: + cluster_name: "sds-grpc" + set_node_on_first_message_only: true + transport_api_version: V3 + initial_fetch_timeout: 0s + resource_api_version: V3 + combined_validation_context: + default_validation_context: + match_subject_alt_names: + - exact: spiffe://cluster.local/ns/{{ .Release.Namespace }}/sa/{{ include "controller.serviceAccountName" . }} + validation_context_sds_secret_config: + name: "ROOTCA" + sds_config: + api_config_source: + api_type: "GRPC" + grpc_services: + - envoy_grpc: + cluster_name: "sds-grpc" + set_node_on_first_message_only: true + transport_api_version: V3 + initial_fetch_timeout: 0s + resource_api_version: V3 + {{- end }} + - applyTo: CLUSTER + match: + context: ANY + cluster: + name: "inbound-ext-authz" + proxy: + proxyVersion: ^1\.19.* + patch: + operation: ADD + value: # cluster specification + name: "inbound-ext-authz" + type: STRICT_DNS + http2_protocol_options: {} + connect_timeout: 10s + load_assignment: + cluster_name: "inbound-ext-authz" + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: "{{ include "controller.fullname" . }}.{{ .Release.Namespace }}" + port_value: 24229 + {{- if and .Values.global.isAPISecurityEnabled (not .Values.istio.global.alreadyInstalled) }} + transport_socket: + name: envoy.transport_sockets.tls + typed_config: + "@type": type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.UpstreamTlsContext + sni: outbound_.24229_._.{{ include "controller.fullname" . }}.{{ .Release.Namespace }}.svc.cluster.local + common_tls_context: + alpn_protocols: + - "istio-peer-exchange" + - "istio" + - "h2" + tls_certificate_sds_secret_configs: + - name: default + sds_config: + api_config_source: + api_type: "GRPC" + grpc_services: + - envoy_grpc: + cluster_name: "sds-grpc" + set_node_on_first_message_only: true + transport_api_version: V3 + initial_fetch_timeout: 0s + resource_api_version: V3 + combined_validation_context: + default_validation_context: + match_subject_alt_names: + - exact: spiffe://cluster.local/ns/{{ .Release.Namespace }}/sa/{{ include "controller.serviceAccountName" . }} + validation_context_sds_secret_config: + name: "ROOTCA" + sds_config: + api_config_source: + api_type: "GRPC" + grpc_services: + - envoy_grpc: + cluster_name: "sds-grpc" + set_node_on_first_message_only: true + transport_api_version: V3 + initial_fetch_timeout: 0s + resource_api_version: V3 + {{- end }} + - applyTo: NETWORK_FILTER + match: + proxy: + proxyVersion: ^1\.19.* + context: SIDECAR_OUTBOUND + listener: + filterChain: + filter: + name: "envoy.filters.network.tcp_proxy" + patch: + operation: INSERT_BEFORE + value: + name: "envoy.filters.network.ext_authz" + typed_config: + "@type": "type.googleapis.com/envoy.extensions.filters.network.ext_authz.v3.ExtAuthz" + stat_prefix: ext_authz + transport_api_version: V3 + include_peer_certificate: true + failure_mode_allow: {{ .Values.global.connectionFailPolicyAllow }} + grpc_service: + envoy_grpc: + cluster_name: outbound-ext-authz + - applyTo: HTTP_FILTER + match: + proxy: + proxyVersion: ^1\.19.* + context: SIDECAR_OUTBOUND + listener: + filterChain: + filter: + name: "envoy.filters.network.http_connection_manager" + subFilter: + name: "envoy.filters.http.router" + patch: + operation: INSERT_BEFORE + value: + name: "envoy.filters.http.ext_authz" + typed_config: + "@type": "type.googleapis.com/envoy.extensions.filters.http.ext_authz.v3.ExtAuthz" + transport_api_version: V3 + include_peer_certificate: true + failure_mode_allow: {{ .Values.global.connectionFailPolicyAllow }} + grpc_service: + envoy_grpc: + cluster_name: outbound-ext-authz + - applyTo: NETWORK_FILTER + match: + proxy: + proxyVersion: ^1\.19.* + context: SIDECAR_INBOUND + listener: + filterChain: + filter: + name: "envoy.filters.network.tcp_proxy" + patch: + operation: INSERT_BEFORE + value: + name: "envoy.filters.network.ext_authz" + typed_config: + "@type": "type.googleapis.com/envoy.extensions.filters.network.ext_authz.v3.ExtAuthz" + stat_prefix: ext_authz + transport_api_version: V3 + include_peer_certificate: true + failure_mode_allow: {{ .Values.global.connectionFailPolicyAllow }} + grpc_service: + envoy_grpc: + cluster_name: inbound-ext-authz + - applyTo: HTTP_FILTER + match: + proxy: + proxyVersion: ^1\.19.* + context: SIDECAR_INBOUND + listener: + filterChain: + filter: + name: "envoy.filters.network.http_connection_manager" + subFilter: + name: "envoy.filters.http.router" + patch: + operation: INSERT_BEFORE + value: + name: "envoy.filters.http.ext_authz" + typed_config: + "@type": "type.googleapis.com/envoy.extensions.filters.http.ext_authz.v3.ExtAuthz" + transport_api_version: V3 + include_peer_certificate: true + failure_mode_allow: {{ .Values.global.connectionFailPolicyAllow }} + grpc_service: + envoy_grpc: + cluster_name: inbound-ext-authz +{{- if .Values.istio.expansion.enabled -}} +--- +apiVersion: networking.istio.io/v1alpha3 +kind: EnvoyFilter +metadata: + name: ext-authz-expansion-1.19 + namespace: istio-system + labels: + owner: {{ include "product.name" . }} + workload-type: expansion +spec: + workloadSelector: + labels: + {{ include "product.name" . }}.io/envoy-auth-config: expansion + configPatches: + - applyTo: CLUSTER + match: + proxy: + proxyVersion: ^1\.19.* + context: ANY + cluster: + name: "outbound-ext-authz" + patch: + operation: ADD + value: # cluster specification + name: "outbound-ext-authz" + type: STATIC + http2_protocol_options: {} + connect_timeout: 10s + load_assignment: + cluster_name: "outbound-ext-authz" + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: "127.0.0.1" + port_value: 24226 + - applyTo: CLUSTER + match: + proxy: + proxyVersion: ^1\.19.* + context: ANY + cluster: + name: "inbound-ext-authz" + patch: + operation: ADD + value: # cluster specification + name: "inbound-ext-authz" + type: STATIC + http2_protocol_options: {} + connect_timeout: 10s + load_assignment: + cluster_name: "inbound-ext-authz" + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: "127.0.0.1" + port_value: 24229 + - applyTo: NETWORK_FILTER + match: + proxy: + proxyVersion: ^1\.19.* + context: SIDECAR_OUTBOUND + listener: + filterChain: + filter: + name: "envoy.filters.network.tcp_proxy" + patch: + operation: INSERT_BEFORE + value: + name: "envoy.filters.network.ext_authz" + typed_config: + "@type": "type.googleapis.com/envoy.extensions.filters.network.ext_authz.v3.ExtAuthz" + stat_prefix: ext_authz + transport_api_version: V3 + include_peer_certificate: true + failure_mode_allow: {{ .Values.global.connectionFailPolicyAllow }} + grpc_service: + envoy_grpc: + cluster_name: outbound-ext-authz + - applyTo: HTTP_FILTER + match: + proxy: + proxyVersion: ^1\.19.* + context: SIDECAR_OUTBOUND + listener: + filterChain: + filter: + name: "envoy.filters.network.http_connection_manager" + subFilter: + name: "envoy.filters.http.router" + patch: + operation: INSERT_BEFORE + value: + name: "envoy.filters.http.ext_authz" + typed_config: + "@type": "type.googleapis.com/envoy.extensions.filters.http.ext_authz.v3.ExtAuthz" + transport_api_version: V3 + include_peer_certificate: true + failure_mode_allow: {{ .Values.global.connectionFailPolicyAllow }} + grpc_service: + envoy_grpc: + cluster_name: outbound-ext-authz + - applyTo: NETWORK_FILTER + match: + proxy: + proxyVersion: ^1\.19.* + context: SIDECAR_INBOUND + listener: + filterChain: + filter: + name: "envoy.filters.network.tcp_proxy" + patch: + operation: INSERT_BEFORE + value: + name: "envoy.filters.network.ext_authz" + typed_config: + "@type": "type.googleapis.com/envoy.extensions.filters.network.ext_authz.v3.ExtAuthz" + stat_prefix: ext_authz + transport_api_version: V3 + include_peer_certificate: true + failure_mode_allow: {{ .Values.global.connectionFailPolicyAllow }} + grpc_service: + envoy_grpc: + cluster_name: inbound-ext-authz + - applyTo: HTTP_FILTER + match: + proxy: + proxyVersion: ^1\.19.* + context: SIDECAR_INBOUND + listener: + filterChain: + filter: + name: "envoy.filters.network.http_connection_manager" + subFilter: + name: "envoy.filters.http.router" + patch: + operation: INSERT_BEFORE + value: + name: "envoy.filters.http.ext_authz" + typed_config: + "@type": "type.googleapis.com/envoy.extensions.filters.http.ext_authz.v3.ExtAuthz" + transport_api_version: V3 + include_peer_certificate: true + failure_mode_allow: {{ .Values.global.connectionFailPolicyAllow }} + grpc_service: + envoy_grpc: + cluster_name: inbound-ext-authz +{{- end }} +{{- end }} +{{- end }} diff --git a/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/templates/istio/extauth_1.9.yaml b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/templates/istio/extauth_1.9.yaml new file mode 100644 index 00000000..d46b8d09 --- /dev/null +++ b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/templates/istio/extauth_1.9.yaml @@ -0,0 +1,385 @@ +{{- if .Values.global.isConnectionEnforcementEnabled -}} +{{- if semverCompare "1.9 - 1.11" .Values.istio.global.version -}} +apiVersion: networking.istio.io/v1alpha3 +kind: EnvoyFilter +metadata: + name: ext-authz-1.9 + namespace: istio-system + labels: + owner: {{ include "product.name" . }} + workload-type: pod +spec: + workloadSelector: + labels: + {{ include "product.name" . }}.io/envoy-auth-config: pod + configPatches: + - applyTo: CLUSTER + match: + proxy: + proxyVersion: ^1\.9.* + context: ANY + cluster: + name: "outbound-ext-authz" + patch: + operation: ADD + value: # cluster specification + name: "outbound-ext-authz" + type: STRICT_DNS + http2_protocol_options: {} + connect_timeout: 10s + load_assignment: + cluster_name: "outbound-ext-authz" + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: "{{ include "controller.fullname" . }}.{{ .Release.Namespace }}" + port_value: 24226 + {{- if and .Values.global.isAPISecurityEnabled (not .Values.istio.global.alreadyInstalled) }} + transport_socket: + name: envoy.transport_sockets.tls + typed_config: + "@type": type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.UpstreamTlsContext + sni: outbound_.24226_._.{{ include "controller.fullname" . }}.{{ .Release.Namespace }}.svc.cluster.local + common_tls_context: + alpn_protocols: + - "istio-peer-exchange" + - "istio" + - "h2" + tls_certificate_sds_secret_configs: + - name: default + sds_config: + api_config_source: + api_type: "GRPC" + grpc_services: + - envoy_grpc: + cluster_name: "sds-grpc" + set_node_on_first_message_only: true + transport_api_version: V3 + initial_fetch_timeout: 0s + resource_api_version: V3 + combined_validation_context: + default_validation_context: + match_subject_alt_names: + - exact: spiffe://cluster.local/ns/{{ .Release.Namespace }}/sa/{{ include "controller.serviceAccountName" . }} + validation_context_sds_secret_config: + name: "ROOTCA" + sds_config: + api_config_source: + api_type: "GRPC" + grpc_services: + - envoy_grpc: + cluster_name: "sds-grpc" + set_node_on_first_message_only: true + transport_api_version: V3 + initial_fetch_timeout: 0s + resource_api_version: V3 + {{- end }} + - applyTo: CLUSTER + match: + context: ANY + cluster: + name: "inbound-ext-authz" + proxy: + proxyVersion: ^1\.9.* + patch: + operation: ADD + value: # cluster specification + name: "inbound-ext-authz" + type: STRICT_DNS + http2_protocol_options: {} + connect_timeout: 10s + load_assignment: + cluster_name: "inbound-ext-authz" + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: "{{ include "controller.fullname" . }}.{{ .Release.Namespace }}" + port_value: 24229 + {{- if and .Values.global.isAPISecurityEnabled (not .Values.istio.global.alreadyInstalled) }} + transport_socket: + name: envoy.transport_sockets.tls + typed_config: + "@type": type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.UpstreamTlsContext + sni: outbound_.24229_._.{{ include "controller.fullname" . }}.{{ .Release.Namespace }}.svc.cluster.local + common_tls_context: + alpn_protocols: + - "istio-peer-exchange" + - "istio" + - "h2" + tls_certificate_sds_secret_configs: + - name: default + sds_config: + api_config_source: + api_type: "GRPC" + grpc_services: + - envoy_grpc: + cluster_name: "sds-grpc" + set_node_on_first_message_only: true + transport_api_version: V3 + initial_fetch_timeout: 0s + resource_api_version: V3 + combined_validation_context: + default_validation_context: + match_subject_alt_names: + - exact: spiffe://cluster.local/ns/{{ .Release.Namespace }}/sa/{{ include "controller.serviceAccountName" . }} + validation_context_sds_secret_config: + name: "ROOTCA" + sds_config: + api_config_source: + api_type: "GRPC" + grpc_services: + - envoy_grpc: + cluster_name: "sds-grpc" + set_node_on_first_message_only: true + transport_api_version: V3 + initial_fetch_timeout: 0s + resource_api_version: V3 + {{- end }} + - applyTo: NETWORK_FILTER + match: + proxy: + proxyVersion: ^1\.9.* + context: SIDECAR_OUTBOUND + listener: + filterChain: + filter: + name: "envoy.filters.network.tcp_proxy" + patch: + operation: INSERT_BEFORE + value: + name: "envoy.filters.network.ext_authz" + typed_config: + "@type": "type.googleapis.com/envoy.extensions.filters.network.ext_authz.v3.ExtAuthz" + stat_prefix: ext_authz + transport_api_version: V3 + include_peer_certificate: true + failure_mode_allow: {{ .Values.global.connectionFailPolicyAllow }} + grpc_service: + envoy_grpc: + cluster_name: outbound-ext-authz + - applyTo: HTTP_FILTER + match: + proxy: + proxyVersion: ^1\.9.* + context: SIDECAR_OUTBOUND + listener: + filterChain: + filter: + name: "envoy.filters.network.http_connection_manager" + subFilter: + name: "envoy.filters.http.router" + patch: + operation: INSERT_BEFORE + value: + name: "envoy.filters.http.ext_authz" + typed_config: + "@type": "type.googleapis.com/envoy.extensions.filters.http.ext_authz.v3.ExtAuthz" + transport_api_version: V3 + include_peer_certificate: true + failure_mode_allow: {{ .Values.global.connectionFailPolicyAllow }} + grpc_service: + envoy_grpc: + cluster_name: outbound-ext-authz + - applyTo: NETWORK_FILTER + match: + proxy: + proxyVersion: ^1\.9.* + context: SIDECAR_INBOUND + listener: + filterChain: + filter: + name: "envoy.filters.network.tcp_proxy" + patch: + operation: INSERT_BEFORE + value: + name: "envoy.filters.network.ext_authz" + typed_config: + "@type": "type.googleapis.com/envoy.extensions.filters.network.ext_authz.v3.ExtAuthz" + stat_prefix: ext_authz + transport_api_version: V3 + include_peer_certificate: true + failure_mode_allow: {{ .Values.global.connectionFailPolicyAllow }} + grpc_service: + envoy_grpc: + cluster_name: inbound-ext-authz + - applyTo: HTTP_FILTER + match: + proxy: + proxyVersion: ^1\.9.* + context: SIDECAR_INBOUND + listener: + filterChain: + filter: + name: "envoy.filters.network.http_connection_manager" + subFilter: + name: "envoy.filters.http.router" + patch: + operation: INSERT_BEFORE + value: + name: "envoy.filters.http.ext_authz" + typed_config: + "@type": "type.googleapis.com/envoy.extensions.filters.http.ext_authz.v3.ExtAuthz" + transport_api_version: V3 + include_peer_certificate: true + failure_mode_allow: {{ .Values.global.connectionFailPolicyAllow }} + grpc_service: + envoy_grpc: + cluster_name: inbound-ext-authz +{{- if .Values.istio.expansion.enabled -}} +--- +apiVersion: networking.istio.io/v1alpha3 +kind: EnvoyFilter +metadata: + name: ext-authz-expansion-1.9 + namespace: istio-system + labels: + owner: {{ include "product.name" . }} + workload-type: expansion +spec: + workloadSelector: + labels: + {{ include "product.name" . }}.io/envoy-auth-config: expansion + configPatches: + - applyTo: CLUSTER + match: + proxy: + proxyVersion: ^1\.9.* + context: ANY + cluster: + name: "outbound-ext-authz" + patch: + operation: ADD + value: # cluster specification + name: "outbound-ext-authz" + type: STATIC + http2_protocol_options: {} + connect_timeout: 10s + load_assignment: + cluster_name: "outbound-ext-authz" + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: "127.0.0.1" + port_value: 24226 + - applyTo: CLUSTER + match: + proxy: + proxyVersion: ^1\.9.* + context: ANY + cluster: + name: "inbound-ext-authz" + patch: + operation: ADD + value: # cluster specification + name: "inbound-ext-authz" + type: STATIC + http2_protocol_options: {} + connect_timeout: 10s + load_assignment: + cluster_name: "inbound-ext-authz" + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: "127.0.0.1" + port_value: 24229 + - applyTo: NETWORK_FILTER + match: + proxy: + proxyVersion: ^1\.9.* + context: SIDECAR_OUTBOUND + listener: + filterChain: + filter: + name: "envoy.filters.network.tcp_proxy" + patch: + operation: INSERT_BEFORE + value: + name: "envoy.filters.network.ext_authz" + typed_config: + "@type": "type.googleapis.com/envoy.extensions.filters.network.ext_authz.v3.ExtAuthz" + stat_prefix: ext_authz + transport_api_version: V3 + include_peer_certificate: true + failure_mode_allow: {{ .Values.global.connectionFailPolicyAllow }} + grpc_service: + envoy_grpc: + cluster_name: outbound-ext-authz + - applyTo: HTTP_FILTER + match: + proxy: + proxyVersion: ^1\.9.* + context: SIDECAR_OUTBOUND + listener: + filterChain: + filter: + name: "envoy.filters.network.http_connection_manager" + subFilter: + name: "envoy.filters.http.router" + patch: + operation: INSERT_BEFORE + value: + name: "envoy.filters.http.ext_authz" + typed_config: + "@type": "type.googleapis.com/envoy.extensions.filters.http.ext_authz.v3.ExtAuthz" + transport_api_version: V3 + include_peer_certificate: true + failure_mode_allow: {{ .Values.global.connectionFailPolicyAllow }} + grpc_service: + envoy_grpc: + cluster_name: outbound-ext-authz + - applyTo: NETWORK_FILTER + match: + proxy: + proxyVersion: ^1\.9.* + context: SIDECAR_INBOUND + listener: + filterChain: + filter: + name: "envoy.filters.network.tcp_proxy" + patch: + operation: INSERT_BEFORE + value: + name: "envoy.filters.network.ext_authz" + typed_config: + "@type": "type.googleapis.com/envoy.extensions.filters.network.ext_authz.v3.ExtAuthz" + stat_prefix: ext_authz + transport_api_version: V3 + include_peer_certificate: true + failure_mode_allow: {{ .Values.global.connectionFailPolicyAllow }} + grpc_service: + envoy_grpc: + cluster_name: inbound-ext-authz + - applyTo: HTTP_FILTER + match: + proxy: + proxyVersion: ^1\.9.* + context: SIDECAR_INBOUND + listener: + filterChain: + filter: + name: "envoy.filters.network.http_connection_manager" + subFilter: + name: "envoy.filters.http.router" + patch: + operation: INSERT_BEFORE + value: + name: "envoy.filters.http.ext_authz" + typed_config: + "@type": "type.googleapis.com/envoy.extensions.filters.http.ext_authz.v3.ExtAuthz" + transport_api_version: V3 + include_peer_certificate: true + failure_mode_allow: {{ .Values.global.connectionFailPolicyAllow }} + grpc_service: + envoy_grpc: + cluster_name: inbound-ext-authz +{{- end }} +{{- end }} +{{- end }} diff --git a/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/templates/istio/passthrough_1.10.yaml b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/templates/istio/passthrough_1.10.yaml new file mode 100644 index 00000000..9e59cb8e --- /dev/null +++ b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/templates/istio/passthrough_1.10.yaml @@ -0,0 +1,36 @@ +{{- if .Values.global.isConnectionEnforcementEnabled -}} +{{- if semverCompare "1.10 - 1.12" .Values.istio.global.version -}} +apiVersion: networking.istio.io/v1alpha3 +kind: EnvoyFilter +metadata: + name: passthrough-cluster-tls-1.10 + namespace: istio-system + labels: + owner: {{ include "product.name" . }} +spec: + configPatches: + - applyTo: CLUSTER + match: + proxy: + proxyVersion: '^1\.10.*' + context: ANY + cluster: + name: "PassthroughClusterTLS" + patch: + operation: ADD + value: # cluster specification + name: PassthroughClusterTLS + connect_timeout: 10s + type: ORIGINAL_DST + lb_policy: CLUSTER_PROVIDED + protocol_selection: "USE_DOWNSTREAM_PROTOCOL" + transport_socket: + name: envoy.transport_sockets.tls + typed_config: + "@type": type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.UpstreamTlsContext + common_tls_context: + validation_context: + trusted_ca: + filename: /etc/ssl/certs/ca-certificates.crt +{{- end }} +{{- end }} diff --git a/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/templates/istio/passthrough_1.11.yaml b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/templates/istio/passthrough_1.11.yaml new file mode 100644 index 00000000..37e80725 --- /dev/null +++ b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/templates/istio/passthrough_1.11.yaml @@ -0,0 +1,36 @@ +{{- if .Values.global.isConnectionEnforcementEnabled -}} +{{- if semverCompare "1.11 - 1.11" .Values.istio.global.version -}} +apiVersion: networking.istio.io/v1alpha3 +kind: EnvoyFilter +metadata: + name: passthrough-cluster-tls-1.11 + namespace: istio-system + labels: + owner: {{ include "product.name" . }} +spec: + configPatches: + - applyTo: CLUSTER + match: + proxy: + proxyVersion: '^1\.11.*' + context: ANY + cluster: + name: "PassthroughClusterTLS" + patch: + operation: ADD + value: # cluster specification + name: PassthroughClusterTLS + connect_timeout: 10s + type: ORIGINAL_DST + lb_policy: CLUSTER_PROVIDED + protocol_selection: "USE_DOWNSTREAM_PROTOCOL" + transport_socket: + name: envoy.transport_sockets.tls + typed_config: + "@type": type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.UpstreamTlsContext + common_tls_context: + validation_context: + trusted_ca: + filename: /etc/ssl/certs/ca-certificates.crt +{{- end }} +{{- end }} diff --git a/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/templates/istio/passthrough_1.12.yaml b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/templates/istio/passthrough_1.12.yaml new file mode 100644 index 00000000..f78fc031 --- /dev/null +++ b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/templates/istio/passthrough_1.12.yaml @@ -0,0 +1,36 @@ +{{- if .Values.global.isConnectionEnforcementEnabled -}} +{{- if semverCompare "1.12 - 1.14" .Values.istio.global.version -}} +apiVersion: networking.istio.io/v1alpha3 +kind: EnvoyFilter +metadata: + name: passthrough-cluster-tls-1.12 + namespace: istio-system + labels: + owner: {{ include "product.name" . }} +spec: + configPatches: + - applyTo: CLUSTER + match: + proxy: + proxyVersion: '^1\.12.*' + context: ANY + cluster: + name: "PassthroughClusterTLS" + patch: + operation: ADD + value: # cluster specification + name: PassthroughClusterTLS + connect_timeout: 10s + type: ORIGINAL_DST + lb_policy: CLUSTER_PROVIDED + protocol_selection: "USE_DOWNSTREAM_PROTOCOL" + transport_socket: + name: envoy.transport_sockets.tls + typed_config: + "@type": type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.UpstreamTlsContext + common_tls_context: + validation_context: + trusted_ca: + filename: /etc/ssl/certs/ca-certificates.crt +{{- end }} +{{- end }} diff --git a/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/templates/istio/passthrough_1.13.yaml b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/templates/istio/passthrough_1.13.yaml new file mode 100644 index 00000000..f304926d --- /dev/null +++ b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/templates/istio/passthrough_1.13.yaml @@ -0,0 +1,36 @@ +{{- if .Values.global.isConnectionEnforcementEnabled -}} +{{- if semverCompare "1.13 - 1.16" .Values.istio.global.version -}} +apiVersion: networking.istio.io/v1alpha3 +kind: EnvoyFilter +metadata: + name: passthrough-cluster-tls-1.13 + namespace: istio-system + labels: + owner: {{ include "product.name" . }} +spec: + configPatches: + - applyTo: CLUSTER + match: + proxy: + proxyVersion: '^1\.13.*' + context: ANY + cluster: + name: "PassthroughClusterTLS" + patch: + operation: ADD + value: # cluster specification + name: PassthroughClusterTLS + connect_timeout: 10s + type: ORIGINAL_DST + lb_policy: CLUSTER_PROVIDED + protocol_selection: "USE_DOWNSTREAM_PROTOCOL" + transport_socket: + name: envoy.transport_sockets.tls + typed_config: + "@type": type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.UpstreamTlsContext + common_tls_context: + validation_context: + trusted_ca: + filename: /etc/ssl/certs/ca-certificates.crt +{{- end }} +{{- end }} diff --git a/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/templates/istio/passthrough_1.14.yaml b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/templates/istio/passthrough_1.14.yaml new file mode 100644 index 00000000..ca7c742a --- /dev/null +++ b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/templates/istio/passthrough_1.14.yaml @@ -0,0 +1,36 @@ +{{- if .Values.global.isConnectionEnforcementEnabled -}} +{{- if semverCompare "1.14 - 1.16" .Values.istio.global.version -}} +apiVersion: networking.istio.io/v1alpha3 +kind: EnvoyFilter +metadata: + name: passthrough-cluster-tls-1.14 + namespace: istio-system + labels: + owner: {{ include "product.name" . }} +spec: + configPatches: + - applyTo: CLUSTER + match: + proxy: + proxyVersion: '^1\.14.*' + context: ANY + cluster: + name: "PassthroughClusterTLS" + patch: + operation: ADD + value: # cluster specification + name: PassthroughClusterTLS + connect_timeout: 10s + type: ORIGINAL_DST + lb_policy: CLUSTER_PROVIDED + protocol_selection: "USE_DOWNSTREAM_PROTOCOL" + transport_socket: + name: envoy.transport_sockets.tls + typed_config: + "@type": type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.UpstreamTlsContext + common_tls_context: + validation_context: + trusted_ca: + filename: /etc/ssl/certs/ca-certificates.crt +{{- end }} +{{- end }} diff --git a/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/templates/istio/passthrough_1.15.yaml b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/templates/istio/passthrough_1.15.yaml new file mode 100644 index 00000000..ce1e15bb --- /dev/null +++ b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/templates/istio/passthrough_1.15.yaml @@ -0,0 +1,36 @@ +{{- if .Values.global.isConnectionEnforcementEnabled -}} +{{- if semverCompare "1.15 - 1.17" .Values.istio.global.version -}} +apiVersion: networking.istio.io/v1alpha3 +kind: EnvoyFilter +metadata: + name: passthrough-cluster-tls-1.15 + namespace: istio-system + labels: + owner: {{ include "product.name" . }} +spec: + configPatches: + - applyTo: CLUSTER + match: + proxy: + proxyVersion: '^1\.15.*' + context: ANY + cluster: + name: "PassthroughClusterTLS" + patch: + operation: ADD + value: # cluster specification + name: PassthroughClusterTLS + connect_timeout: 10s + type: ORIGINAL_DST + lb_policy: CLUSTER_PROVIDED + protocol_selection: "USE_DOWNSTREAM_PROTOCOL" + transport_socket: + name: envoy.transport_sockets.tls + typed_config: + "@type": type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.UpstreamTlsContext + common_tls_context: + validation_context: + trusted_ca: + filename: /etc/ssl/certs/ca-certificates.crt +{{- end }} +{{- end }} diff --git a/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/templates/istio/passthrough_1.16.yaml b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/templates/istio/passthrough_1.16.yaml new file mode 100644 index 00000000..79ffc416 --- /dev/null +++ b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/templates/istio/passthrough_1.16.yaml @@ -0,0 +1,36 @@ +{{- if .Values.global.isConnectionEnforcementEnabled -}} +{{- if semverCompare "1.16 - 1.18" .Values.istio.global.version -}} +apiVersion: networking.istio.io/v1alpha3 +kind: EnvoyFilter +metadata: + name: passthrough-cluster-tls-1.16 + namespace: istio-system + labels: + owner: {{ include "product.name" . }} +spec: + configPatches: + - applyTo: CLUSTER + match: + proxy: + proxyVersion: '^1\.16.*' + context: ANY + cluster: + name: "PassthroughClusterTLS" + patch: + operation: ADD + value: # cluster specification + name: PassthroughClusterTLS + connect_timeout: 10s + type: ORIGINAL_DST + lb_policy: CLUSTER_PROVIDED + protocol_selection: "USE_DOWNSTREAM_PROTOCOL" + transport_socket: + name: envoy.transport_sockets.tls + typed_config: + "@type": type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.UpstreamTlsContext + common_tls_context: + validation_context: + trusted_ca: + filename: /etc/ssl/certs/ca-certificates.crt +{{- end }} +{{- end }} diff --git a/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/templates/istio/passthrough_1.17.yaml b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/templates/istio/passthrough_1.17.yaml new file mode 100644 index 00000000..df1ac658 --- /dev/null +++ b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/templates/istio/passthrough_1.17.yaml @@ -0,0 +1,36 @@ +{{- if .Values.global.isConnectionEnforcementEnabled -}} +{{- if semverCompare "1.17 - 1.19" .Values.istio.global.version -}} +apiVersion: networking.istio.io/v1alpha3 +kind: EnvoyFilter +metadata: + name: passthrough-cluster-tls-1.17 + namespace: istio-system + labels: + owner: {{ include "product.name" . }} +spec: + configPatches: + - applyTo: CLUSTER + match: + proxy: + proxyVersion: '^1\.17.*' + context: ANY + cluster: + name: "PassthroughClusterTLS" + patch: + operation: ADD + value: # cluster specification + name: PassthroughClusterTLS + connect_timeout: 10s + type: ORIGINAL_DST + lb_policy: CLUSTER_PROVIDED + protocol_selection: "USE_DOWNSTREAM_PROTOCOL" + transport_socket: + name: envoy.transport_sockets.tls + typed_config: + "@type": type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.UpstreamTlsContext + common_tls_context: + validation_context: + trusted_ca: + filename: /etc/ssl/certs/ca-certificates.crt +{{- end }} +{{- end }} diff --git a/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/templates/istio/passthrough_1.18.yaml b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/templates/istio/passthrough_1.18.yaml new file mode 100644 index 00000000..152562c8 --- /dev/null +++ b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/templates/istio/passthrough_1.18.yaml @@ -0,0 +1,36 @@ +{{- if .Values.global.isConnectionEnforcementEnabled -}} +{{- if semverCompare "1.18 - 1.20" .Values.istio.global.version -}} +apiVersion: networking.istio.io/v1alpha3 +kind: EnvoyFilter +metadata: + name: passthrough-cluster-tls-1.18 + namespace: istio-system + labels: + owner: {{ include "product.name" . }} +spec: + configPatches: + - applyTo: CLUSTER + match: + proxy: + proxyVersion: '^1\.18.*' + context: ANY + cluster: + name: "PassthroughClusterTLS" + patch: + operation: ADD + value: # cluster specification + name: PassthroughClusterTLS + connect_timeout: 10s + type: ORIGINAL_DST + lb_policy: CLUSTER_PROVIDED + protocol_selection: "USE_DOWNSTREAM_PROTOCOL" + transport_socket: + name: envoy.transport_sockets.tls + typed_config: + "@type": type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.UpstreamTlsContext + common_tls_context: + validation_context: + trusted_ca: + filename: /etc/ssl/certs/ca-certificates.crt +{{- end }} +{{- end }} diff --git a/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/templates/istio/passthrough_1.19.yaml b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/templates/istio/passthrough_1.19.yaml new file mode 100644 index 00000000..7adbb1a6 --- /dev/null +++ b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/templates/istio/passthrough_1.19.yaml @@ -0,0 +1,36 @@ +{{- if .Values.global.isConnectionEnforcementEnabled -}} +{{- if semverCompare "1.19 - 1.21" .Values.istio.global.version -}} +apiVersion: networking.istio.io/v1alpha3 +kind: EnvoyFilter +metadata: + name: passthrough-cluster-tls-1.19 + namespace: istio-system + labels: + owner: {{ include "product.name" . }} +spec: + configPatches: + - applyTo: CLUSTER + match: + proxy: + proxyVersion: '^1\.19.*' + context: ANY + cluster: + name: "PassthroughClusterTLS" + patch: + operation: ADD + value: # cluster specification + name: PassthroughClusterTLS + connect_timeout: 10s + type: ORIGINAL_DST + lb_policy: CLUSTER_PROVIDED + protocol_selection: "USE_DOWNSTREAM_PROTOCOL" + transport_socket: + name: envoy.transport_sockets.tls + typed_config: + "@type": type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.UpstreamTlsContext + common_tls_context: + validation_context: + trusted_ca: + filename: /etc/ssl/certs/ca-certificates.crt +{{- end }} +{{- end }} diff --git a/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/templates/istio/passthrough_1.9.yaml b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/templates/istio/passthrough_1.9.yaml new file mode 100644 index 00000000..9df858a5 --- /dev/null +++ b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/templates/istio/passthrough_1.9.yaml @@ -0,0 +1,36 @@ +{{- if .Values.global.isConnectionEnforcementEnabled -}} +{{- if semverCompare "1.9 - 1.11" .Values.istio.global.version -}} +apiVersion: networking.istio.io/v1alpha3 +kind: EnvoyFilter +metadata: + name: passthrough-cluster-tls-1.9 + namespace: istio-system + labels: + owner: {{ include "product.name" . }} +spec: + configPatches: + - applyTo: CLUSTER + match: + proxy: + proxyVersion: '^1\.9.*' + context: ANY + cluster: + name: "PassthroughClusterTLS" + patch: + operation: ADD + value: # cluster specification + name: PassthroughClusterTLS + connect_timeout: 10s + type: ORIGINAL_DST + lb_policy: CLUSTER_PROVIDED + protocol_selection: "USE_DOWNSTREAM_PROTOCOL" + transport_socket: + name: envoy.transport_sockets.tls + typed_config: + "@type": type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.UpstreamTlsContext + common_tls_context: + validation_context: + trusted_ca: + filename: /etc/ssl/certs/ca-certificates.crt +{{- end }} +{{- end }} diff --git a/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/templates/istio/peerauth.yaml b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/templates/istio/peerauth.yaml new file mode 100644 index 00000000..dd235834 --- /dev/null +++ b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/templates/istio/peerauth.yaml @@ -0,0 +1,10 @@ +{{- if and .Values.global.isConnectionEnforcementEnabled .Values.global.isAPISecurityEnabled (not .Values.istio.global.alreadyInstalled) -}} +apiVersion: "security.istio.io/v1beta1" +kind: "PeerAuthentication" +metadata: + name: {{ include "product.name" . }}-mtls-pa + namespace: {{ .Release.Namespace }} +spec: + mtls: + mode: STRICT +{{- end }} diff --git a/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/templates/istio/post-delete-job.yaml b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/templates/istio/post-delete-job.yaml new file mode 100644 index 00000000..8fc79719 --- /dev/null +++ b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/templates/istio/post-delete-job.yaml @@ -0,0 +1,153 @@ +{{- if .Values.global.isConnectionEnforcementEnabled -}} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ include "controller.fullname" . }}-istio-post-delete + namespace: {{ .Release.Namespace }} + labels: + {{- include "controller.labels" . | nindent 4 }} + annotations: + helm.sh/hook: post-delete + helm.sh/hook-delete-policy: before-hook-creation,hook-succeeded,hook-failed +data: + uninstall.sh: |- + #!/bin/sh -x + + kubectl delete destinationrules.networking.istio.io --all-namespaces -l owner={{ include "product.name" . }} --ignore-not-found + kubectl delete peerauthentications.security.istio.io --all-namespaces -l owner={{ include "product.name" . }} --ignore-not-found + kubectl delete serviceentries.networking.istio.io --all-namespaces -l owner={{ include "product.name" . }} --ignore-not-found + kubectl delete virtualservices.networking.istio.io --all-namespaces -l owner={{ include "product.name" . }} --ignore-not-found + kubectl delete envoyfilter.networking.istio.io --all-namespaces -l owner={{ include "product.name" . }} --ignore-not-found + kubectl delete secrets --all-namespaces --field-selector type=istio.io/custom-id --ignore-not-found + kubectl delete secrets --all-namespaces --field-selector type=istio.io/key-and-cert --ignore-not-found + + function refresh_all_pods() { + kubectl -n $1 rollout restart deployments + kubectl -n $1 rollout restart daemonsets + kubectl -n $1 rollout restart statefulsets + } + + function refresh_all_connections_only_protected_namespaces() { + PROTECTED_NAMESPACE_LIST=$(kubectl get ns -l SecureApplication-protected=connections-only -o jsonpath='{.items[*].metadata.name}') + echo "Refreshing pods in all connections-only protected namespaces" + for namespace_name in $PROTECTED_NAMESPACE_LIST ; do + refresh_all_pods $namespace_name + done + } + + function refresh_all_full_protected_namespaces() { + PROTECTED_NAMESPACE_LIST=$(kubectl get ns -l SecureApplication-protected=full -o jsonpath='{.items[*].metadata.name}') + echo "Refreshing pods in all full protected namespaces" + for namespace_name in $PROTECTED_NAMESPACE_LIST ; do + refresh_all_pods $namespace_name + done + } + + function refresh_all_protected_namespaces() { + refresh_all_connections_only_protected_namespaces + refresh_all_full_protected_namespaces + } + + refresh_all_protected_namespaces +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: {{ include "controller.fullname" . }}-istio-post-delete + namespace: {{ .Release.Namespace }} + labels: + {{- include "controller.labels" . | nindent 4 }} + annotations: + helm.sh/hook: post-delete + helm.sh/hook-delete-policy: before-hook-creation,hook-succeeded,hook-failed +rules: + - apiGroups: [""] + resources: ["secrets"] + verbs: ["list", "delete"] + - apiGroups: ["networking.istio.io"] + resources: ["destinationrules", "serviceentries", "virtualservices", "envoyfilters"] + verbs: ["list", "delete"] + - apiGroups: ["security.istio.io"] + resources: ["peerauthentications"] + verbs: ["list", "delete"] + - apiGroups: ["apps"] + resources: ["statefulsets", "daemonsets", "deployments"] + verbs: ["list", "patch"] + - apiGroups: [""] + resources: ["namespaces"] + verbs: ["list"] +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ include "controller.fullname" . }}-istio-post-delete + namespace: {{ .Release.Namespace }} + labels: + {{- include "controller.labels" . | nindent 4 }} + annotations: + helm.sh/hook: post-delete + helm.sh/hook-delete-policy: before-hook-creation,hook-succeeded,hook-failed +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: {{ include "controller.fullname" . }}-istio-post-delete + namespace: {{ .Release.Namespace }} + labels: + {{- include "controller.labels" . | nindent 4 }} + annotations: + helm.sh/hook: post-delete + helm.sh/hook-delete-policy: before-hook-creation,hook-succeeded,hook-failed +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: {{ include "controller.fullname" . }}-istio-post-delete +subjects: + - kind: ServiceAccount + name: {{ include "controller.fullname" . }}-istio-post-delete + namespace: {{ .Release.Namespace }} +--- +apiVersion: batch/v1 +kind: Job +metadata: + name: {{ include "controller.fullname" . }}-istio-post-delete + namespace: {{ .Release.Namespace }} + labels: + {{- include "controller.labels" . | nindent 4 }} + annotations: + helm.sh/hook: post-delete + helm.sh/hook-delete-policy: before-hook-creation,hook-succeeded,hook-failed +spec: + backoffLimit: 0 + template: + metadata: + name: {{ include "controller.fullname" . }}-istio-post-delete + spec: + restartPolicy: Never + serviceAccountName: {{ include "controller.fullname" . }}-istio-post-delete + securityContext: + runAsNonRoot: true + runAsUser: 65532 + fsGroup: 65532 + containers: + - name: kubectl + image: "{{ default .Values.global.registry .Values.kubectl.image.registry }}/{{ .Values.kubectl.image.repository }}:{{ .Values.kubectl.image.tag }}" + imagePullPolicy: {{ .Values.kubectl.image.pullPolicy }} + command: [ "/bin/sh", "-c", "/opt/uninstall.sh" ] + volumeMounts: + - name: uninstall-script + mountPath: /opt/uninstall.sh + subPath: uninstall.sh + volumes: + - name: uninstall-script + configMap: + name: {{ include "controller.fullname" . }}-istio-post-delete + items: + - key: uninstall.sh + path: uninstall.sh + mode: 0777 + {{- with .Values.controller.nodeSelector }} + nodeSelector: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- end }} diff --git a/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/templates/istio/sidecar.yaml b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/templates/istio/sidecar.yaml new file mode 100644 index 00000000..4817db94 --- /dev/null +++ b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/templates/istio/sidecar.yaml @@ -0,0 +1,13 @@ +{{- if and .Values.global.isConnectionEnforcementEnabled .Values.istio.global.serviceDiscoveryIsolationEnabled -}} +apiVersion: networking.istio.io/v1beta1 +kind: Sidecar +metadata: + name: default + namespace: istio-system +spec: + egress: + - hosts: + - "./*" + - "istio-system/*" + - "{{ .Release.Namespace }}/*" +{{- end }} diff --git a/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/templates/istio/stackdriver_1.10.yaml b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/templates/istio/stackdriver_1.10.yaml new file mode 100644 index 00000000..8fabf8b1 --- /dev/null +++ b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/templates/istio/stackdriver_1.10.yaml @@ -0,0 +1,81 @@ +{{- if .Values.global.isConnectionEnforcementEnabled -}} +{{- if semverCompare "1.10 - 1.12" .Values.istio.global.version -}} +apiVersion: networking.istio.io/v1alpha3 +kind: EnvoyFilter +metadata: + name: stackdriver-filter-1.10 + namespace: istio-system + labels: + owner: {{ include "product.name" . }} +spec: + configPatches: + - applyTo: HTTP_FILTER + match: + context: SIDECAR_INBOUND + proxy: + proxyVersion: '^1\.10.*' + listener: + filterChain: + filter: + name: "envoy.filters.network.http_connection_manager" + subFilter: + name: "envoy.filters.http.router" + patch: + operation: INSERT_BEFORE + value: + name: istio.stackdriver + typed_config: + "@type": type.googleapis.com/udpa.type.v1.TypedStruct + type_url: type.googleapis.com/envoy.extensions.filters.http.wasm.v3.Wasm + value: + config: + root_id: stackdriver_inbound + configuration: + "@type": "type.googleapis.com/google.protobuf.StringValue" + value: | + {"disable_host_header_fallback":false,"disable_server_access_logging":false,"enable_mesh_edges_reporting":false} + vm_config: + vm_id: stackdriver_inbound + runtime: envoy.wasm.runtime.null + code: + local: { inline_string: envoy.wasm.null.stackdriver } +--- +apiVersion: networking.istio.io/v1alpha3 +kind: EnvoyFilter +metadata: + name: tcp-stackdriver-filter-1.10 + namespace: istio-system + labels: + owner: {{ include "product.name" . }} +spec: + configPatches: + - applyTo: NETWORK_FILTER + match: + context: SIDECAR_INBOUND + proxy: + proxyVersion: '^1\.10.*' + listener: + filterChain: + filter: + name: "envoy.filters.network.tcp_proxy" + patch: + operation: INSERT_BEFORE + value: + name: istio.stackdriver + typed_config: + "@type": type.googleapis.com/udpa.type.v1.TypedStruct + type_url: type.googleapis.com/envoy.extensions.filters.network.wasm.v3.Wasm + value: + config: + root_id: stackdriver_inbound + configuration: + "@type": "type.googleapis.com/google.protobuf.StringValue" + value: | + {"disable_host_header_fallback":false,"disable_server_access_logging":false,"enable_mesh_edges_reporting":false} + vm_config: + vm_id: stackdriver_inbound + runtime: envoy.wasm.runtime.null + code: + local: { inline_string: envoy.wasm.null.stackdriver } +{{- end }} +{{- end }} diff --git a/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/templates/istio/stackdriver_1.11.yaml b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/templates/istio/stackdriver_1.11.yaml new file mode 100644 index 00000000..180d5c29 --- /dev/null +++ b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/templates/istio/stackdriver_1.11.yaml @@ -0,0 +1,81 @@ +{{- if .Values.global.isConnectionEnforcementEnabled -}} +{{- if semverCompare "1.11 - 1.13" .Values.istio.global.version -}} +apiVersion: networking.istio.io/v1alpha3 +kind: EnvoyFilter +metadata: + name: stackdriver-filter-1.11 + namespace: istio-system + labels: + owner: {{ include "product.name" . }} +spec: + configPatches: + - applyTo: HTTP_FILTER + match: + context: SIDECAR_INBOUND + proxy: + proxyVersion: '^1\.11.*' + listener: + filterChain: + filter: + name: "envoy.filters.network.http_connection_manager" + subFilter: + name: "envoy.filters.http.router" + patch: + operation: INSERT_BEFORE + value: + name: istio.stackdriver + typed_config: + "@type": type.googleapis.com/udpa.type.v1.TypedStruct + type_url: type.googleapis.com/envoy.extensions.filters.http.wasm.v3.Wasm + value: + config: + root_id: stackdriver_inbound + configuration: + "@type": "type.googleapis.com/google.protobuf.StringValue" + value: | + {"disable_host_header_fallback":false,"disable_server_access_logging":false,"enable_mesh_edges_reporting":false} + vm_config: + vm_id: stackdriver_inbound + runtime: envoy.wasm.runtime.null + code: + local: { inline_string: envoy.wasm.null.stackdriver } +--- +apiVersion: networking.istio.io/v1alpha3 +kind: EnvoyFilter +metadata: + name: tcp-stackdriver-filter-1.11 + namespace: istio-system + labels: + owner: {{ include "product.name" . }} +spec: + configPatches: + - applyTo: NETWORK_FILTER + match: + context: SIDECAR_INBOUND + proxy: + proxyVersion: '^1\.11.*' + listener: + filterChain: + filter: + name: "envoy.filters.network.tcp_proxy" + patch: + operation: INSERT_BEFORE + value: + name: istio.stackdriver + typed_config: + "@type": type.googleapis.com/udpa.type.v1.TypedStruct + type_url: type.googleapis.com/envoy.extensions.filters.network.wasm.v3.Wasm + value: + config: + root_id: stackdriver_inbound + configuration: + "@type": "type.googleapis.com/google.protobuf.StringValue" + value: | + {"disable_host_header_fallback":false,"disable_server_access_logging":false,"enable_mesh_edges_reporting":false} + vm_config: + vm_id: stackdriver_inbound + runtime: envoy.wasm.runtime.null + code: + local: { inline_string: envoy.wasm.null.stackdriver } +{{- end }} +{{- end }} diff --git a/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/templates/istio/stackdriver_1.12.yaml b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/templates/istio/stackdriver_1.12.yaml new file mode 100644 index 00000000..1065bd7f --- /dev/null +++ b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/templates/istio/stackdriver_1.12.yaml @@ -0,0 +1,81 @@ +{{- if .Values.global.isConnectionEnforcementEnabled -}} +{{- if semverCompare "1.12 - 1.14" .Values.istio.global.version -}} +apiVersion: networking.istio.io/v1alpha3 +kind: EnvoyFilter +metadata: + name: stackdriver-filter-1.12 + namespace: istio-system + labels: + owner: {{ include "product.name" . }} +spec: + configPatches: + - applyTo: HTTP_FILTER + match: + context: SIDECAR_INBOUND + proxy: + proxyVersion: '^1\.12.*' + listener: + filterChain: + filter: + name: "envoy.filters.network.http_connection_manager" + subFilter: + name: "envoy.filters.http.router" + patch: + operation: INSERT_BEFORE + value: + name: istio.stackdriver + typed_config: + "@type": type.googleapis.com/udpa.type.v1.TypedStruct + type_url: type.googleapis.com/envoy.extensions.filters.http.wasm.v3.Wasm + value: + config: + root_id: stackdriver_inbound + configuration: + "@type": "type.googleapis.com/google.protobuf.StringValue" + value: | + {"disable_host_header_fallback":false,"disable_server_access_logging":false,"enable_mesh_edges_reporting":false} + vm_config: + vm_id: stackdriver_inbound + runtime: envoy.wasm.runtime.null + code: + local: { inline_string: envoy.wasm.null.stackdriver } +--- +apiVersion: networking.istio.io/v1alpha3 +kind: EnvoyFilter +metadata: + name: tcp-stackdriver-filter-1.12 + namespace: istio-system + labels: + owner: {{ include "product.name" . }} +spec: + configPatches: + - applyTo: NETWORK_FILTER + match: + context: SIDECAR_INBOUND + proxy: + proxyVersion: '^1\.12.*' + listener: + filterChain: + filter: + name: "envoy.filters.network.tcp_proxy" + patch: + operation: INSERT_BEFORE + value: + name: istio.stackdriver + typed_config: + "@type": type.googleapis.com/udpa.type.v1.TypedStruct + type_url: type.googleapis.com/envoy.extensions.filters.network.wasm.v3.Wasm + value: + config: + root_id: stackdriver_inbound + configuration: + "@type": "type.googleapis.com/google.protobuf.StringValue" + value: | + {"disable_host_header_fallback":false,"disable_server_access_logging":false,"enable_mesh_edges_reporting":false} + vm_config: + vm_id: stackdriver_inbound + runtime: envoy.wasm.runtime.null + code: + local: { inline_string: envoy.wasm.null.stackdriver } +{{- end }} +{{- end }} diff --git a/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/templates/istio/stackdriver_1.13.yaml b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/templates/istio/stackdriver_1.13.yaml new file mode 100644 index 00000000..684dccc9 --- /dev/null +++ b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/templates/istio/stackdriver_1.13.yaml @@ -0,0 +1,83 @@ +{{- if .Values.global.isConnectionEnforcementEnabled -}} +{{- if semverCompare "1.13 - 1.16" .Values.istio.global.version -}} +apiVersion: networking.istio.io/v1alpha3 +kind: EnvoyFilter +metadata: + name: stackdriver-filter-1.13 + namespace: istio-system + labels: + owner: {{ include "product.name" . }} +spec: + priority: -1 + configPatches: + - applyTo: HTTP_FILTER + match: + context: SIDECAR_INBOUND + proxy: + proxyVersion: '^1\.13.*' + listener: + filterChain: + filter: + name: "envoy.filters.network.http_connection_manager" + subFilter: + name: "envoy.filters.http.router" + patch: + operation: INSERT_BEFORE + value: + name: istio.stackdriver + typed_config: + "@type": type.googleapis.com/udpa.type.v1.TypedStruct + type_url: type.googleapis.com/envoy.extensions.filters.http.wasm.v3.Wasm + value: + config: + root_id: stackdriver_inbound + configuration: + "@type": "type.googleapis.com/google.protobuf.StringValue" + value: | + {"disable_host_header_fallback":false,"disable_server_access_logging":false,"enable_mesh_edges_reporting":false} + vm_config: + vm_id: stackdriver_inbound + runtime: envoy.wasm.runtime.null + code: + local: { inline_string: envoy.wasm.null.stackdriver } +--- +apiVersion: networking.istio.io/v1alpha3 +kind: EnvoyFilter +metadata: + name: tcp-stackdriver-filter-1.13 + namespace: istio-system + labels: + owner: {{ include "product.name" . }} +spec: + priority: -1 + configPatches: + - applyTo: NETWORK_FILTER + match: + context: SIDECAR_INBOUND + proxy: + proxyVersion: '^1\.13.*' + listener: + filterChain: + filter: + name: "envoy.filters.network.tcp_proxy" + patch: + operation: INSERT_BEFORE + value: + name: istio.stackdriver + typed_config: + "@type": type.googleapis.com/udpa.type.v1.TypedStruct + type_url: type.googleapis.com/envoy.extensions.filters.network.wasm.v3.Wasm + value: + config: + root_id: stackdriver_inbound + configuration: + "@type": "type.googleapis.com/google.protobuf.StringValue" + value: | + {"disable_host_header_fallback":false,"disable_server_access_logging":false,"enable_mesh_edges_reporting":false} + vm_config: + vm_id: stackdriver_inbound + runtime: envoy.wasm.runtime.null + code: + local: { inline_string: envoy.wasm.null.stackdriver } +{{- end }} +{{- end }} diff --git a/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/templates/istio/stackdriver_1.14.yaml b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/templates/istio/stackdriver_1.14.yaml new file mode 100644 index 00000000..c6541d84 --- /dev/null +++ b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/templates/istio/stackdriver_1.14.yaml @@ -0,0 +1,83 @@ +{{- if .Values.global.isConnectionEnforcementEnabled -}} +{{- if semverCompare "1.14 - 1.16" .Values.istio.global.version -}} +apiVersion: networking.istio.io/v1alpha3 +kind: EnvoyFilter +metadata: + name: stackdriver-filter-1.14 + namespace: istio-system + labels: + owner: {{ include "product.name" . }} +spec: + priority: -1 + configPatches: + - applyTo: HTTP_FILTER + match: + context: SIDECAR_INBOUND + proxy: + proxyVersion: '^1\.14.*' + listener: + filterChain: + filter: + name: "envoy.filters.network.http_connection_manager" + subFilter: + name: "envoy.filters.http.router" + patch: + operation: INSERT_BEFORE + value: + name: istio.stackdriver + typed_config: + "@type": type.googleapis.com/udpa.type.v1.TypedStruct + type_url: type.googleapis.com/envoy.extensions.filters.http.wasm.v3.Wasm + value: + config: + root_id: stackdriver_inbound + configuration: + "@type": "type.googleapis.com/google.protobuf.StringValue" + value: | + {"disable_host_header_fallback":false,"disable_server_access_logging":false,"enable_mesh_edges_reporting":false} + vm_config: + vm_id: stackdriver_inbound + runtime: envoy.wasm.runtime.null + code: + local: { inline_string: envoy.wasm.null.stackdriver } +--- +apiVersion: networking.istio.io/v1alpha3 +kind: EnvoyFilter +metadata: + name: tcp-stackdriver-filter-1.14 + namespace: istio-system + labels: + owner: {{ include "product.name" . }} +spec: + priority: -1 + configPatches: + - applyTo: NETWORK_FILTER + match: + context: SIDECAR_INBOUND + proxy: + proxyVersion: '^1\.14.*' + listener: + filterChain: + filter: + name: "envoy.filters.network.tcp_proxy" + patch: + operation: INSERT_BEFORE + value: + name: istio.stackdriver + typed_config: + "@type": type.googleapis.com/udpa.type.v1.TypedStruct + type_url: type.googleapis.com/envoy.extensions.filters.network.wasm.v3.Wasm + value: + config: + root_id: stackdriver_inbound + configuration: + "@type": "type.googleapis.com/google.protobuf.StringValue" + value: | + {"disable_host_header_fallback":false,"disable_server_access_logging":false,"enable_mesh_edges_reporting":false} + vm_config: + vm_id: stackdriver_inbound + runtime: envoy.wasm.runtime.null + code: + local: { inline_string: envoy.wasm.null.stackdriver } +{{- end }} +{{- end }} diff --git a/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/templates/istio/stackdriver_1.15.yaml b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/templates/istio/stackdriver_1.15.yaml new file mode 100644 index 00000000..e6223e89 --- /dev/null +++ b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/templates/istio/stackdriver_1.15.yaml @@ -0,0 +1,83 @@ +{{- if .Values.global.isConnectionEnforcementEnabled -}} +{{- if semverCompare "1.15 - 1.17" .Values.istio.global.version -}} +apiVersion: networking.istio.io/v1alpha3 +kind: EnvoyFilter +metadata: + name: stackdriver-filter-1.15 + namespace: istio-system + labels: + owner: {{ include "product.name" . }} +spec: + priority: -1 + configPatches: + - applyTo: HTTP_FILTER + match: + context: SIDECAR_INBOUND + proxy: + proxyVersion: '^1\.15.*' + listener: + filterChain: + filter: + name: "envoy.filters.network.http_connection_manager" + subFilter: + name: "envoy.filters.http.router" + patch: + operation: INSERT_BEFORE + value: + name: istio.stackdriver + typed_config: + "@type": type.googleapis.com/udpa.type.v1.TypedStruct + type_url: type.googleapis.com/envoy.extensions.filters.http.wasm.v3.Wasm + value: + config: + root_id: stackdriver_inbound + configuration: + "@type": "type.googleapis.com/google.protobuf.StringValue" + value: | + {"disable_host_header_fallback":false,"disable_server_access_logging":false,"enable_mesh_edges_reporting":false} + vm_config: + vm_id: stackdriver_inbound + runtime: envoy.wasm.runtime.null + code: + local: { inline_string: envoy.wasm.null.stackdriver } +--- +apiVersion: networking.istio.io/v1alpha3 +kind: EnvoyFilter +metadata: + name: tcp-stackdriver-filter-1.15 + namespace: istio-system + labels: + owner: {{ include "product.name" . }} +spec: + priority: -1 + configPatches: + - applyTo: NETWORK_FILTER + match: + context: SIDECAR_INBOUND + proxy: + proxyVersion: '^1\.15.*' + listener: + filterChain: + filter: + name: "envoy.filters.network.tcp_proxy" + patch: + operation: INSERT_BEFORE + value: + name: istio.stackdriver + typed_config: + "@type": type.googleapis.com/udpa.type.v1.TypedStruct + type_url: type.googleapis.com/envoy.extensions.filters.network.wasm.v3.Wasm + value: + config: + root_id: stackdriver_inbound + configuration: + "@type": "type.googleapis.com/google.protobuf.StringValue" + value: | + {"disable_host_header_fallback":false,"disable_server_access_logging":false,"enable_mesh_edges_reporting":false} + vm_config: + vm_id: stackdriver_inbound + runtime: envoy.wasm.runtime.null + code: + local: { inline_string: envoy.wasm.null.stackdriver } +{{- end }} +{{- end }} diff --git a/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/templates/istio/stackdriver_1.16.yaml b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/templates/istio/stackdriver_1.16.yaml new file mode 100644 index 00000000..7cbcf84a --- /dev/null +++ b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/templates/istio/stackdriver_1.16.yaml @@ -0,0 +1,83 @@ +{{- if .Values.global.isConnectionEnforcementEnabled -}} +{{- if semverCompare "1.16 - 1.18" .Values.istio.global.version -}} +apiVersion: networking.istio.io/v1alpha3 +kind: EnvoyFilter +metadata: + name: stackdriver-filter-1.16 + namespace: istio-system + labels: + owner: {{ include "product.name" . }} +spec: + priority: -1 + configPatches: + - applyTo: HTTP_FILTER + match: + context: SIDECAR_INBOUND + proxy: + proxyVersion: '^1\.16.*' + listener: + filterChain: + filter: + name: "envoy.filters.network.http_connection_manager" + subFilter: + name: "envoy.filters.http.router" + patch: + operation: INSERT_BEFORE + value: + name: istio.stackdriver + typed_config: + "@type": type.googleapis.com/udpa.type.v1.TypedStruct + type_url: type.googleapis.com/envoy.extensions.filters.http.wasm.v3.Wasm + value: + config: + root_id: stackdriver_inbound + configuration: + "@type": "type.googleapis.com/google.protobuf.StringValue" + value: | + {"disable_host_header_fallback":false,"disable_server_access_logging":false,"enable_mesh_edges_reporting":false} + vm_config: + vm_id: stackdriver_inbound + runtime: envoy.wasm.runtime.null + code: + local: { inline_string: envoy.wasm.null.stackdriver } +--- +apiVersion: networking.istio.io/v1alpha3 +kind: EnvoyFilter +metadata: + name: tcp-stackdriver-filter-1.16 + namespace: istio-system + labels: + owner: {{ include "product.name" . }} +spec: + priority: -1 + configPatches: + - applyTo: NETWORK_FILTER + match: + context: SIDECAR_INBOUND + proxy: + proxyVersion: '^1\.16.*' + listener: + filterChain: + filter: + name: "envoy.filters.network.tcp_proxy" + patch: + operation: INSERT_BEFORE + value: + name: istio.stackdriver + typed_config: + "@type": type.googleapis.com/udpa.type.v1.TypedStruct + type_url: type.googleapis.com/envoy.extensions.filters.network.wasm.v3.Wasm + value: + config: + root_id: stackdriver_inbound + configuration: + "@type": "type.googleapis.com/google.protobuf.StringValue" + value: | + {"disable_host_header_fallback":false,"disable_server_access_logging":false,"enable_mesh_edges_reporting":false} + vm_config: + vm_id: stackdriver_inbound + runtime: envoy.wasm.runtime.null + code: + local: { inline_string: envoy.wasm.null.stackdriver } +{{- end }} +{{- end }} diff --git a/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/templates/istio/stackdriver_1.17.yaml b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/templates/istio/stackdriver_1.17.yaml new file mode 100644 index 00000000..93897630 --- /dev/null +++ b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/templates/istio/stackdriver_1.17.yaml @@ -0,0 +1,83 @@ +{{- if .Values.global.isConnectionEnforcementEnabled -}} +{{- if semverCompare "1.17 - 1.19" .Values.istio.global.version -}} +apiVersion: networking.istio.io/v1alpha3 +kind: EnvoyFilter +metadata: + name: stackdriver-filter-1.17 + namespace: istio-system + labels: + owner: {{ include "product.name" . }} +spec: + priority: -1 + configPatches: + - applyTo: HTTP_FILTER + match: + context: SIDECAR_INBOUND + proxy: + proxyVersion: '^1\.17.*' + listener: + filterChain: + filter: + name: "envoy.filters.network.http_connection_manager" + subFilter: + name: "envoy.filters.http.router" + patch: + operation: INSERT_BEFORE + value: + name: istio.stackdriver + typed_config: + "@type": type.googleapis.com/udpa.type.v1.TypedStruct + type_url: type.googleapis.com/envoy.extensions.filters.http.wasm.v3.Wasm + value: + config: + root_id: stackdriver_inbound + configuration: + "@type": "type.googleapis.com/google.protobuf.StringValue" + value: | + {"disable_host_header_fallback":false,"disable_server_access_logging":false,"enable_mesh_edges_reporting":false} + vm_config: + vm_id: stackdriver_inbound + runtime: envoy.wasm.runtime.null + code: + local: { inline_string: envoy.wasm.null.stackdriver } +--- +apiVersion: networking.istio.io/v1alpha3 +kind: EnvoyFilter +metadata: + name: tcp-stackdriver-filter-1.17 + namespace: istio-system + labels: + owner: {{ include "product.name" . }} +spec: + priority: -1 + configPatches: + - applyTo: NETWORK_FILTER + match: + context: SIDECAR_INBOUND + proxy: + proxyVersion: '^1\.17.*' + listener: + filterChain: + filter: + name: "envoy.filters.network.tcp_proxy" + patch: + operation: INSERT_BEFORE + value: + name: istio.stackdriver + typed_config: + "@type": type.googleapis.com/udpa.type.v1.TypedStruct + type_url: type.googleapis.com/envoy.extensions.filters.network.wasm.v3.Wasm + value: + config: + root_id: stackdriver_inbound + configuration: + "@type": "type.googleapis.com/google.protobuf.StringValue" + value: | + {"disable_host_header_fallback":false,"disable_server_access_logging":false,"enable_mesh_edges_reporting":false} + vm_config: + vm_id: stackdriver_inbound + runtime: envoy.wasm.runtime.null + code: + local: { inline_string: envoy.wasm.null.stackdriver } +{{- end }} +{{- end }} diff --git a/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/templates/istio/stackdriver_1.18.yaml b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/templates/istio/stackdriver_1.18.yaml new file mode 100644 index 00000000..7f133cc4 --- /dev/null +++ b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/templates/istio/stackdriver_1.18.yaml @@ -0,0 +1,83 @@ +{{- if .Values.global.isConnectionEnforcementEnabled -}} +{{- if semverCompare "1.18 - 1.20" .Values.istio.global.version -}} +apiVersion: networking.istio.io/v1alpha3 +kind: EnvoyFilter +metadata: + name: stackdriver-filter-1.18 + namespace: istio-system + labels: + owner: {{ include "product.name" . }} +spec: + priority: -1 + configPatches: + - applyTo: HTTP_FILTER + match: + context: SIDECAR_INBOUND + proxy: + proxyVersion: '^1\.18.*' + listener: + filterChain: + filter: + name: "envoy.filters.network.http_connection_manager" + subFilter: + name: "envoy.filters.http.router" + patch: + operation: INSERT_BEFORE + value: + name: istio.stackdriver + typed_config: + "@type": type.googleapis.com/udpa.type.v1.TypedStruct + type_url: type.googleapis.com/envoy.extensions.filters.http.wasm.v3.Wasm + value: + config: + root_id: stackdriver_inbound + configuration: + "@type": "type.googleapis.com/google.protobuf.StringValue" + value: | + {"disable_host_header_fallback":false,"disable_server_access_logging":false,"enable_mesh_edges_reporting":false} + vm_config: + vm_id: stackdriver_inbound + runtime: envoy.wasm.runtime.null + code: + local: { inline_string: envoy.wasm.null.stackdriver } +--- +apiVersion: networking.istio.io/v1alpha3 +kind: EnvoyFilter +metadata: + name: tcp-stackdriver-filter-1.18 + namespace: istio-system + labels: + owner: {{ include "product.name" . }} +spec: + priority: -1 + configPatches: + - applyTo: NETWORK_FILTER + match: + context: SIDECAR_INBOUND + proxy: + proxyVersion: '^1\.18.*' + listener: + filterChain: + filter: + name: "envoy.filters.network.tcp_proxy" + patch: + operation: INSERT_BEFORE + value: + name: istio.stackdriver + typed_config: + "@type": type.googleapis.com/udpa.type.v1.TypedStruct + type_url: type.googleapis.com/envoy.extensions.filters.network.wasm.v3.Wasm + value: + config: + root_id: stackdriver_inbound + configuration: + "@type": "type.googleapis.com/google.protobuf.StringValue" + value: | + {"disable_host_header_fallback":false,"disable_server_access_logging":false,"enable_mesh_edges_reporting":false} + vm_config: + vm_id: stackdriver_inbound + runtime: envoy.wasm.runtime.null + code: + local: { inline_string: envoy.wasm.null.stackdriver } +{{- end }} +{{- end }} diff --git a/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/templates/istio/stackdriver_1.19.yaml b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/templates/istio/stackdriver_1.19.yaml new file mode 100644 index 00000000..85293350 --- /dev/null +++ b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/templates/istio/stackdriver_1.19.yaml @@ -0,0 +1,83 @@ +{{- if .Values.global.isConnectionEnforcementEnabled -}} +{{- if semverCompare "1.19 - 1.21" .Values.istio.global.version -}} +apiVersion: networking.istio.io/v1alpha3 +kind: EnvoyFilter +metadata: + name: stackdriver-filter-1.19 + namespace: istio-system + labels: + owner: {{ include "product.name" . }} +spec: + priority: -1 + configPatches: + - applyTo: HTTP_FILTER + match: + context: SIDECAR_INBOUND + proxy: + proxyVersion: '^1\.19.*' + listener: + filterChain: + filter: + name: "envoy.filters.network.http_connection_manager" + subFilter: + name: "envoy.filters.http.router" + patch: + operation: INSERT_BEFORE + value: + name: istio.stackdriver + typed_config: + "@type": type.googleapis.com/udpa.type.v1.TypedStruct + type_url: type.googleapis.com/envoy.extensions.filters.http.wasm.v3.Wasm + value: + config: + root_id: stackdriver_inbound + configuration: + "@type": "type.googleapis.com/google.protobuf.StringValue" + value: | + {"disable_host_header_fallback":false,"disable_server_access_logging":false,"enable_mesh_edges_reporting":false} + vm_config: + vm_id: stackdriver_inbound + runtime: envoy.wasm.runtime.null + code: + local: { inline_string: envoy.wasm.null.stackdriver } +--- +apiVersion: networking.istio.io/v1alpha3 +kind: EnvoyFilter +metadata: + name: tcp-stackdriver-filter-1.19 + namespace: istio-system + labels: + owner: {{ include "product.name" . }} +spec: + priority: -1 + configPatches: + - applyTo: NETWORK_FILTER + match: + context: SIDECAR_INBOUND + proxy: + proxyVersion: '^1\.19.*' + listener: + filterChain: + filter: + name: "envoy.filters.network.tcp_proxy" + patch: + operation: INSERT_BEFORE + value: + name: istio.stackdriver + typed_config: + "@type": type.googleapis.com/udpa.type.v1.TypedStruct + type_url: type.googleapis.com/envoy.extensions.filters.network.wasm.v3.Wasm + value: + config: + root_id: stackdriver_inbound + configuration: + "@type": "type.googleapis.com/google.protobuf.StringValue" + value: | + {"disable_host_header_fallback":false,"disable_server_access_logging":false,"enable_mesh_edges_reporting":false} + vm_config: + vm_id: stackdriver_inbound + runtime: envoy.wasm.runtime.null + code: + local: { inline_string: envoy.wasm.null.stackdriver } +{{- end }} +{{- end }} diff --git a/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/templates/istio/stackdriver_1.9.yaml b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/templates/istio/stackdriver_1.9.yaml new file mode 100644 index 00000000..e7fedbbe --- /dev/null +++ b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/templates/istio/stackdriver_1.9.yaml @@ -0,0 +1,81 @@ +{{- if .Values.global.isConnectionEnforcementEnabled -}} +{{- if semverCompare "1.9 - 1.11" .Values.istio.global.version -}} +apiVersion: networking.istio.io/v1alpha3 +kind: EnvoyFilter +metadata: + name: stackdriver-filter-1.9 + namespace: istio-system + labels: + owner: {{ include "product.name" . }} +spec: + configPatches: + - applyTo: HTTP_FILTER + match: + context: SIDECAR_INBOUND + proxy: + proxyVersion: '^1\.9.*' + listener: + filterChain: + filter: + name: "envoy.filters.network.http_connection_manager" + subFilter: + name: "envoy.filters.http.router" + patch: + operation: INSERT_BEFORE + value: + name: istio.stackdriver + typed_config: + "@type": type.googleapis.com/udpa.type.v1.TypedStruct + type_url: type.googleapis.com/envoy.extensions.filters.http.wasm.v3.Wasm + value: + config: + root_id: stackdriver_inbound + configuration: + "@type": "type.googleapis.com/google.protobuf.StringValue" + value: | + {"disable_host_header_fallback":false,"disable_server_access_logging":false,"enable_mesh_edges_reporting":false} + vm_config: + vm_id: stackdriver_inbound + runtime: envoy.wasm.runtime.null + code: + local: { inline_string: envoy.wasm.null.stackdriver } +--- +apiVersion: networking.istio.io/v1alpha3 +kind: EnvoyFilter +metadata: + name: tcp-stackdriver-filter-1.9 + namespace: istio-system + labels: + owner: {{ include "product.name" . }} +spec: + configPatches: + - applyTo: NETWORK_FILTER + match: + context: SIDECAR_INBOUND + proxy: + proxyVersion: '^1\.9.*' + listener: + filterChain: + filter: + name: "envoy.filters.network.tcp_proxy" + patch: + operation: INSERT_BEFORE + value: + name: istio.stackdriver + typed_config: + "@type": type.googleapis.com/udpa.type.v1.TypedStruct + type_url: type.googleapis.com/envoy.extensions.filters.network.wasm.v3.Wasm + value: + config: + root_id: stackdriver_inbound + configuration: + "@type": "type.googleapis.com/google.protobuf.StringValue" + value: | + {"disable_host_header_fallback":false,"disable_server_access_logging":false,"enable_mesh_edges_reporting":false} + vm_config: + vm_id: stackdriver_inbound + runtime: envoy.wasm.runtime.null + code: + local: { inline_string: envoy.wasm.null.stackdriver } +{{- end }} +{{- end }} diff --git a/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/templates/seccomp-installer/_helpers.tpl b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/templates/seccomp-installer/_helpers.tpl new file mode 100644 index 00000000..588e9151 --- /dev/null +++ b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/templates/seccomp-installer/_helpers.tpl @@ -0,0 +1,45 @@ +{{- define "seccomp-installer.name" -}} +{{- "seccomp-installer" }} +{{- end }} + +{{/* +Create a default fully qualified app name. +*/}} +{{- define "seccomp-installer.fullname" -}} +{{- "seccomp-installer" }} +{{- end }} + +{{/* +Common labels +*/}} +{{- define "seccomp-installer.labels" -}} +helm.sh/chart: {{ include "panoptica.chart" . }} +{{ include "seccomp-installer.selectorLabels" . }} +{{- if .Chart.AppVersion }} +app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} +{{- end }} +app.kubernetes.io/managed-by: {{ .Release.Service }} +{{- if .Values.global.extraLabels }} +{{ toYaml $.Values.global.extraLabels }} +{{- end }} +{{- end }} + +{{/* +Selector labels +*/}} +{{- define "seccomp-installer.selectorLabels" -}} +app: {{ include "seccomp-installer.name" . }} +app.kubernetes.io/name: {{ include "seccomp-installer.name" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +{{- end }} + +{{/* +Create the name of the service account to use +*/}} +{{- define "seccomp-installer.serviceAccountName" -}} +{{- if .Values.seccompInstaller.serviceAccount.create }} +{{- default (include "seccomp-installer.fullname" .) .Values.seccompInstaller.serviceAccount.name }} +{{- else }} +{{- default "default" .Values.seccompInstaller.serviceAccount.name }} +{{- end }} +{{- end }} diff --git a/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/templates/seccomp-installer/serviceaccount.yaml b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/templates/seccomp-installer/serviceaccount.yaml new file mode 100644 index 00000000..35d940f1 --- /dev/null +++ b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/templates/seccomp-installer/serviceaccount.yaml @@ -0,0 +1,13 @@ +{{- if and .Values.global.isContainerSecurityEnabled .Values.seccompInstaller.serviceAccount.create -}} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: seccomp-installer + namespace: {{ .Release.Namespace }} + labels: + {{- include "controller.labels" . | nindent 4 }} + {{- with .Values.seccompInstaller.serviceAccount.annotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} +{{- end }} diff --git a/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/templates/upgrader/clusterrolebinding.yaml b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/templates/upgrader/clusterrolebinding.yaml new file mode 100644 index 00000000..71c309cf --- /dev/null +++ b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/templates/upgrader/clusterrolebinding.yaml @@ -0,0 +1,16 @@ +{{- if not .Values.global.isManagedByHelm -}} +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: {{ include "product.name" . }}-upgrader + labels: + {{- include "controller.labels" . | nindent 4 }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: cluster-admin +subjects: + - kind: ServiceAccount + name: {{ include "product.name" . }}-upgrader + namespace: {{ .Release.Namespace }} +{{- end }} \ No newline at end of file diff --git a/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/templates/upgrader/serviceaccount.yaml b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/templates/upgrader/serviceaccount.yaml new file mode 100644 index 00000000..92b223b5 --- /dev/null +++ b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/templates/upgrader/serviceaccount.yaml @@ -0,0 +1,9 @@ +{{- if not .Values.global.isManagedByHelm -}} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ include "product.name" . }}-upgrader + namespace: {{ .Release.Namespace }} + labels: + {{- include "controller.labels" . | nindent 4 }} +{{- end }} diff --git a/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/templates/upgrader/upgrader-template-configmap.yaml b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/templates/upgrader/upgrader-template-configmap.yaml new file mode 100644 index 00000000..676e9eef --- /dev/null +++ b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/templates/upgrader/upgrader-template-configmap.yaml @@ -0,0 +1,72 @@ +{{- if not .Values.global.isManagedByHelm }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: upgrader-job-template + namespace: {{ .Release.Namespace }} + labels: + {{- include "controller.labels" . | nindent 4 }} +data: + config: |- + apiVersion: batch/v1 + kind: Job + metadata: + name: upgrader + namespace: {{ .Release.Namespace }} + labels: + app: upgrader + spec: + activeDeadlineSeconds: 1200 + ttlSecondsAfterFinished: 30 + backoffLimit: 0 + template: + spec: + restartPolicy: Never + serviceAccountName: {{ include "product.name" . }}-upgrader + containers: + - name: upgrader + imagePullPolicy: {{ .Values.upgrader.image.pullPolicy }} + args: + - run + - --log-level + - {{ .Values.controller.logLevel }} + env: + - name: AGENT_ID + value: {{ required "Agent ID is required!" .Values.controller.agentID | quote }} + - name: MANAGEMENT_HOST + value: {{ .Values.global.mgmtHostname | quote }} + - name: SHARED_SECRET + valueFrom: + secretKeyRef: + name: {{ .Values.controller.secret.existingSecret | default (include "controller.fullname" .) }} + key: {{ .Values.controller.secret.sharedSecretKey | default "SHARED_SECRET" }} + - name: POD_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: metadata.name + - name: XDG_CACHE_HOME + value: /tmp + - name: SCRIPT_API_VERSION + value: 1 + securityContext: + {{- toYaml .Values.upgrader.securityContext | nindent 16 }} + resources: + {{- toYaml .Values.upgrader.resources | nindent 16 }} + volumeMounts: + - mountPath: /tmp + name: tmp-volume + volumes: + - name: tmp-volume + emptyDir: {} + securityContext: + {{- toYaml .Values.upgrader.podSecurityContext | nindent 12 }} + {{- with .Values.upgrader.nodeSelector }} + nodeSelector: + {{- toYaml . | nindent 12 }} + {{- end }} + {{- with .Values.upgrader.tolerations }} + tolerations: + {{- toYaml . | nindent 12 }} + {{- end }} +{{- end }} diff --git a/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/values.yaml b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/values.yaml new file mode 100644 index 00000000..6b09bb30 --- /dev/null +++ b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/charts/panoptica/values.yaml @@ -0,0 +1,516 @@ +global: + # -- Override product name. Defaults to chart name. + productNameOverride: "portshift" + # -- Override detected cluster version. + kubeVersionOverride: "" + # @ignored + isManagedByHelm: true + # @ignored - used for helm lint testing to skip istio/vault chart verification + skipCrossChartsVerification: false + # -- Panoptica SaaS URL. Used to override default URL for local testing. + mgmtHostname: "" + # -- Indicates whether kubernetes security is enabled. + isContainerSecurityEnabled: true + # -- Indicates whether K8s CIS benchmark is enabled. + k8sCisBenchmarkEnabled: true + # -- Indicates whether SSH monitoring is enabled. + isSSHMonitorEnabled: true + # -- Indicates whether Deployer Policy enforcement is enabled. + validateDeployerPolicy: false + # -- Indicates whether K8s Events monitoring is enabled. + k8sEventsEnabled: true + # -- Indicates whether API security is enabled. + isAPISecurityEnabled: false + # -- Indicates whether connection enforcement is enabled. If true, make sure istio is installed by using panoptica istio chart or an upstream istio is already installed. + isConnectionEnforcementEnabled: false + # -- Indicates whether token injection feature is enabled. If true, make sure vault is installed by using panoptica vault chart. + tokenInjectionEnabled: false + # -- Indicates whether istio should provision workload certificates using a custom certificate authority that integrates with the Kubernetes CSR API. + isExternalCaEnabled: false + # -- Indicates whether installed in an OpenShift environment. + isOpenShift: false + # -- Proxy address to use for HTTP request if needed. + httpProxy: "" + # -- Proxy address to use for HTTPs request if needed. In most cases, this is the same as `httpProxy`. + httpsProxy: "" + # -- Registry for the Panoptica images. If replaced with a local registry need to make sure all images are pulled into the local registry. + registry: "gcr.io/eticloud/k8sec" + # -- Configures telemetry frequency (in seconds) for reporting duration. + sendTelemetriesIntervalSec: 30 + # -- If false, connections on protected namespaces will be blocked if the controller is not responding. + connectionFailPolicyAllow: true + # -- If false, pods creation on protected namespaces will be blocked if the controller is not responding. + environmentFailurePolicyAllow: true + # -- Indicates whether TLS inspection is enabled. If true, the controller will be able to decrypt and re-encrypt HTTPS traffic for connections to be inspected via layer 7 attributes. + enableTlsInspection: false + # -- Placeholder used for tests. + dummyPlaceHolderForTest: false + # -- Indicates whether auto label is enabled. If true, new namespaces will be labeled with the protection label. + autoLabelEnabled: false + # -- Indicates whether to identity pods whose templates originated from the Panoptica CD plugin. See `CD Pod template` section in https://panoptica.readme.io/docs/deploy-on-a-kubernetes-cluster for more info. + cdValidation: false + # -- Indicates whether to identity pods only if all images are signed by a trusted signer. See https://panoptica.readme.io/docs/trusted-signers for more info. + ciImageSignatureValidation: false + # -- Indicates whether to identity pods only if all image hashes are known to Panoptica. See `CI image hash validation` section in https://panoptica.readme.io/docs/deploy-on-a-kubernetes-cluster for more info. + ciImageValidation: false + # -- Indicates whether to identity pods only if all images are pulled from trusted registries. See `Restrict Registries` section in https://panoptica.readme.io/docs/deploy-on-a-kubernetes-cluster for more info. + restrictRegistries: false + # -- Indicates whether the controller should preserve the original source ip of inbound connections. + preserveOriginalSourceIp: false + # -- Allow labelling resources with custom key/value pairs. + extraLabels: {} +# key: value + +controller: + nameOverride: "portshift-agent" + fullnameOverride: "portshift-agent" + + # -- Configure controller replica count number in case autoscaling is disabled. + replicaCount: 1 + + image: + # -- Image registry, used to override global.registry if needed. + registry: "" + repository: k8s_agent + tag: fdb16f4d5c28fef6538d01b07ed2520bc9253809 + pullPolicy: IfNotPresent + + imagePullSecrets: [] + + # -- Logging level (debug, info, warning, error, fatal, panic). + logLevel: warning + + secret: + # -- [Required if controller.existingSecret isn't set] Shared secret used by the controller to communicate with the SaaS, should be extracted from SaaS after cluster creation. + sharedSecret: "" + # -- Existing secret that contains shared secret used by the controller to communicate with the SaaS. + existingSecret: "" + # -- Shared secret key is the key of the shared secret, default: SHARED_SECRET. + sharedSecretKey: "" + + # -- [Required] Controller identification, should be extracted from SaaS after cluster creation. + agentID: "" + + serviceAccount: + # -- Specifies whether a service account should be created + create: true + # -- Annotations to add to the service account + annotations: {} + # -- The name of the service account to use. + # If not set and create is true, a name is generated using the fullname template + name: "" + + podSecurityContext: + fsGroup: 1001 + + securityContext: + capabilities: + drop: + - ALL + runAsNonRoot: true + runAsGroup: 1001 + runAsUser: 1001 + privileged: false + allowPrivilegeEscalation: false + readOnlyRootFilesystem: true + + service: + type: ClusterIP + + resources: + requests: + cpu: 500m + memory: 2048Mi + + autoscaling: + enabled: true + minReplicas: 1 + maxReplicas: 5 + targetCPUUtilizationPercentage: 80 + # targetMemoryUtilizationPercentage: 80 + + pdb: + create: true + minAvailable: 1 + # maxUnavailable: 1 + + # -- Node labels for controller pod assignment + nodeSelector: + kubernetes.io/os: linux + + tolerations: [] + + affinity: {} + + persistence: + # -- Enable persistence using Persistent Volume Claims + enabled: false + pvcSuffix: pvc-fdb16f4d5c28fef6538d01b07ed2520bc9253809 + accessMode: "ReadWriteOnce" + # -- The storage space that should be claimed from the persistent volume + size: 100Mi + # -- If defined, storageClassName will be set to the defined storageClass. + # If set to "-", storageClassName will be set to an empty string (""), which disables dynamic provisioning. + # If undefined or set to null (the default), no storageClassName spec is set, + # choosing 'standard' storage class available with the default provisioner (gcd-pd on GKE, hostpath on minikube, etc). + storageClass: + +apiclarity: + image: + # -- Image registry, used to override global.registry if needed. + registry: "" + repository: apiclarity + tag: 9a09d167c27046e6d76a96e6e4f248f166b9fc8f + pullPolicy: IfNotPresent + + imagePullSecrets: [] + + # -- Logging level (debug, info, warning, error, fatal, panic). + logLevel: warning + + traceWasmFilterSHA256: 5f48a298d47422f6fb8e03b5c856fae5c4aaab60b8b9e9f28a13ca34d22bf0b7 + + traceSource: + # -- Indicates whether istio supply traces. + istio: false + # -- Indicates whether external GWs supply traces. + external: false + + serviceAccount: + # -- Specifies whether a service account should be created + create: true + # -- Annotations to add to the service account + annotations: {} + # -- The name of the service account to use. + # If not set and create is true, a name is generated using the fullname template + name: "" + + podSecurityContext: + fsGroup: 1000 + + securityContext: + capabilities: + drop: + - ALL + runAsNonRoot: true + runAsGroup: 1000 + runAsUser: 1000 + privileged: false + allowPrivilegeEscalation: false + readOnlyRootFilesystem: true + + resources: + limits: + cpu: 1000m + memory: 1000Mi + requests: + cpu: 100m + memory: 200Mi + + # -- Node labels for pod assignment + nodeSelector: + kubernetes.io/os: linux + + tolerations: [] + + affinity: {} + + persistence: + accessMode: "ReadWriteOnce" + # -- The storage space that should be claimed from the persistent volume + size: 100Mi + # -- If defined, storageClassName will be set to the defined storageClass. + # If set to "-", storageClassName will be set to an empty string (""), which disables dynamic provisioning. + # If undefined or set to null (the default), no storageClassName spec is set, + # choosing 'standard' storage class available with the default provisioner (gcd-pd on GKE, hostpath on minikube, etc). + storageClass: + + fuzzer: + # Indicates whether API security fuzz test is enabled. + enabled: false + + image: + # -- Image registry, used to override global.registry if needed. + registry: "" + repository: scn-dast + tag: b0e698ea50aa701d22a1f8fbe549d45c340e0b91 + pullPolicy: Always + + # -- Configure fuzzer labels + labels: + app: fuzzer + + debug: false + + securityContext: + capabilities: + drop: + - ALL + runAsNonRoot: true + runAsGroup: 1001 + runAsUser: 1001 + privileged: false + allowPrivilegeEscalation: false + readOnlyRootFilesystem: true + + resources: + requests: + memory: "200Mi" + cpu: "100m" + limits: + memory: "1000Mi" + cpu: "200m" + + # -- Node labels for pod assignment + nodeSelector: + kubernetes.io/os: linux + + affinity: {} + +apiclarity-postgresql: + image: + # -- Image registry, must be set to override the dependency registry. + registry: gcr.io/eticloud/k8sec + repository: bitnami/postgresql + tag: 14.4.0-debian-11-r4 + pullPolicy: IfNotPresent + + ## initdb parameters + # initdb: + ## ConfigMap with scripts to be run at first boot + ## NOTE: This will override initdb.scripts + # scriptsConfigMap + ## Secret with scripts to be run at first boot (in case it contains sensitive information) + ## NOTE: This can work along initdbScripts or initdbScriptsConfigMap + # scriptsSecret: + ## Specify the PostgreSQL username and password to execute the initdb scripts + # user: + # password: + + ## Setup database name and password + auth: + existingSecret: apiclarity-postgresql-secret + database: apiclarity + + ## Enable security context + containerSecurityContext: + enabled: true + runAsUser: 1001 + runAsNonRoot: true + + ## By default, postgresql full name includes the release name, we don't want it. + fullnameOverride: "apiclarity-postgresql" + +istio: + global: + # -- Indicates whether istio is already installed and not by Panoptica charts. + alreadyInstalled: false + # -- Indicates what version of istio is running, change only if `alreadyInstalled` is set to true. + version: "1.19.0" + # Indicates whether istio service information will be synced only to proxies in the same namespace. + serviceDiscoveryIsolationEnabled: false + + expansion: + enabled: false + +kafkaAuthzInjector: + image: + # -- Image registry, used to override global.registry if needed. + registry: "" + repository: kafka-authz + tag: e647ba66cf10897ee6e07a3d6d81b2148d0a47be + pullPolicy: Always + + securityContext: + capabilities: + drop: + - ALL + runAsNonRoot: true + runAsGroup: 1001 + runAsUser: 1001 + privileged: false + allowPrivilegeEscalation: false + readOnlyRootFilesystem: true + +dnsDetector: + image: + # -- Image registry, used to override global.registry if needed. + registry: "" + repository: gopassivedns + tag: 0c7330b51a07cdebe13e57b1d1a33134cbbe04ce + pullPolicy: IfNotPresent + + resources: + requests: + memory: 50Mi + cpu: 20m + limits: + memory: 100Mi + cpu: 200m + +seccompInstaller: + serviceAccount: + # -- Specifies whether a service account should be created + create: true + # -- Annotations to add to the service account + annotations: {} + # -- The name of the service account to use. + # If not set and create is true, a name is generated using the fullname template + name: "" + +k8sCISBenchmark: + image: + # -- Image registry, used to override global.registry if needed. + registry: "" + repository: k8s-cis-benchmark + tag: f5b0490258b1cb87ce6eddc2a3083482135dcf5c + + # -- Node labels for pod assignment + nodeSelector: + kubernetes.io/os: linux + +imageAnalysis: + # -- Scanner jobs namespace. + # If left blank, the scanner jobs will run in release namespace. + # If set, the scanner jobs will run in the given namespace unless the image requires image pull secrets which are located in a target pod + jobDefaultNamespace: "" + + registry: + skipVerifyTlS: "false" + useHTTP: "false" + + # -- Node labels for controller pod assignment + nodeSelector: + kubernetes.io/os: linux + + tolerations: [] + + sbom: + enabled: true + image: + # -- Image registry, used to override global.registry if needed. + registry: "" + repository: image-analyzer + tag: 5f969c4535b52368ff7e288f6c9a2ce8bea019b0 + + podSecurityContext: + fsGroup: 1001 + + securityContext: + capabilities: + drop: + - ALL + runAsNonRoot: true + runAsGroup: 1001 + runAsUser: 1001 + privileged: false + allowPrivilegeEscalation: false + readOnlyRootFilesystem: true + + resources: + requests: + memory: "50Mi" + cpu: "50m" + limits: + memory: "2000Mi" + cpu: "1000m" + + cisDockerBenchmark: + enabled: false + + image: + # -- Image registry, used to override global.registry if needed. + registry: "" + repository: cis-docker-benchmark + tag: a281d02d480ba3fc815d176731fa9412fe872ad3 + + podSecurityContext: + fsGroup: 1001 + + securityContext: + capabilities: + drop: + - ALL + runAsNonRoot: true + runAsGroup: 1001 + runAsUser: 1001 + privileged: false + allowPrivilegeEscalation: false + readOnlyRootFilesystem: true + + resources: + requests: + memory: "50Mi" + cpu: "50m" + limits: + memory: "1000Mi" + cpu: "1000m" + + # -- The max number of scanner jobs that will run in the cluster in parallel for image analysis in total + parallelScanners: 4 + +kubectl: + image: + # -- Image registry, used to override global.registry if needed. + registry: "" + repository: kubectl + tag: v1.27.1 + pullPolicy: IfNotPresent + +busybox: + image: + # -- Image registry, used to override global.registry if needed. + registry: "" + repository: curlimages/curl + tag: latest + pullPolicy: IfNotPresent + + securityContext: + capabilities: + drop: + - ALL + runAsNonRoot: true + runAsGroup: 1001 + runAsUser: 1001 + privileged: false + allowPrivilegeEscalation: false + readOnlyRootFilesystem: true + +upgrader: + image: + pullPolicy: Always + + podSecurityContext: + fsGroup: 1001 + + securityContext: + capabilities: + drop: + - ALL + runAsNonRoot: true + runAsGroup: 1001 + runAsUser: 1001 + privileged: false + allowPrivilegeEscalation: false + readOnlyRootFilesystem: true + + resources: + requests: + memory: 50Mi + cpu: 50m + limits: + memory: 1000Mi + cpu: 1000m + + # -- Node labels for pod assignment + nodeSelector: + kubernetes.io/os: linux + + tolerations: [] + +vaultEnv: + image: + # -- Image registry, used to override global.registry if needed. + registry: "" + repository: bank-vaults/vault-env + tag: v1.21.0 diff --git a/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/templates/NOTES.txt b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/templates/NOTES.txt new file mode 100644 index 00000000..68e8ce8a --- /dev/null +++ b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/templates/NOTES.txt @@ -0,0 +1,5 @@ +CHART NAME: {{ .Chart.Name }} +CHART VERSION: {{ .Chart.Version }} +APP VERSION: {{ .Chart.AppVersion }} + +** Please be patient while the chart is being deployed ** diff --git a/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/templates/_helpers.tpl b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/templates/_helpers.tpl new file mode 100644 index 00000000..ebc5fe52 --- /dev/null +++ b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/templates/_helpers.tpl @@ -0,0 +1,63 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "appdynamics-security-collector.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "appdynamics-security-collector.fullname" -}} +{{- if .Values.fullnameOverride }} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }} +{{- else }} +{{- $name := default .Chart.Name .Values.nameOverride }} +{{- if contains $name .Release.Name }} +{{- .Release.Name | trunc 63 | trimSuffix "-" }} +{{- else }} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }} +{{- end }} +{{- end }} +{{- end }} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "appdynamics-security-collector.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Common labels +*/}} +{{- define "appdynamics-security-collector.labels" -}} +helm.sh/chart: {{ include "appdynamics-security-collector.chart" . }} +{{ include "appdynamics-security-collector.selectorLabels" . }} +{{- if .Chart.AppVersion }} +app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} +{{- end }} +app.kubernetes.io/managed-by: {{ .Release.Service }} +{{- end }} + +{{/* +Selector labels +*/}} +{{- define "appdynamics-security-collector.selectorLabels" -}} +app.kubernetes.io/name: {{ include "appdynamics-security-collector.name" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +{{- end }} + +{{/* +Create the name of the service account to use +*/}} +{{- define "appdynamics-security-collector.serviceAccountName" -}} +{{- if .Values.serviceAccount.create }} +{{- default (include "appdynamics-security-collector.fullname" .) .Values.serviceAccount.name }} +{{- else }} +{{- default "default" .Values.serviceAccount.name }} +{{- end }} +{{- end }} diff --git a/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/values.yaml b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/values.yaml new file mode 100644 index 00000000..14bf7e61 --- /dev/null +++ b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/charts/appdynamics-security-collector/values.yaml @@ -0,0 +1,7 @@ +panoptica: + k8sEventsEnabled: false + isContainerSecurityEnabled: false + controller: + agentID: "" + secret: + sharedSecret: "" diff --git a/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/ci/appdynamics-collectors-values.yaml b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/ci/appdynamics-collectors-values.yaml new file mode 100644 index 00000000..3ae55f6a --- /dev/null +++ b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/ci/appdynamics-collectors-values.yaml @@ -0,0 +1,8 @@ +global: + clusterName: test + +appdynamics-otel-collector: + client_id: client-id-test + client_secret: client-secret-test + token_url: token-url-test + endpoint: endpoint-test \ No newline at end of file diff --git a/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/examples/certs.yaml b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/examples/certs.yaml new file mode 100644 index 00000000..cbce506a --- /dev/null +++ b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/examples/certs.yaml @@ -0,0 +1,59 @@ +apiVersion: cert-manager.io/v1 +kind: ClusterIssuer +metadata: + name: selfsigned-issuer +spec: + selfSigned: {} +--- +apiVersion: cert-manager.io/v1 +kind: Certificate +metadata: + name: my-selfsigned-ca + namespace: appdynamics +spec: + isCA: true + commonName: my-selfsigned-ca + secretName: root-secret + privateKey: + algorithm: ECDSA + size: 256 + issuerRef: + name: selfsigned-issuer + kind: ClusterIssuer + group: cert-manager.io +--- +apiVersion: cert-manager.io/v1 +kind: Issuer +metadata: + name: ca-cert + namespace: appdynamics +spec: + ca: + secretName: root-secret +--- +apiVersion: cert-manager.io/v1 +kind: Certificate +metadata: + name: server-cert + namespace: appdynamics +spec: + secretName: server-secret + issuerRef: + name: ca-cert + kind: Issuer + commonName: clustermon-service + dnsNames: + - appdynamics-otel-collector-service + - appdynamics-otel-collector-service.appdynamics.svc.cluster.local +--- +apiVersion: cert-manager.io/v1 +kind: Certificate +metadata: + name: client-cert + namespace: appdynamics +spec: + secretName: client-secret + issuerRef: + name: ca-cert + kind: Issuer + commonName: client \ No newline at end of file diff --git a/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/examples/lca-sample-configs/complex-and-or.yaml b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/examples/lca-sample-configs/complex-and-or.yaml new file mode 100644 index 00000000..3f9d6f84 --- /dev/null +++ b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/examples/lca-sample-configs/complex-and-or.yaml @@ -0,0 +1,43 @@ +global: + clusterName: "ClusterName" +appdynamics-cloud-k8s-monitoring: + install: + clustermon: true + defaultInfraCollectors: true + logCollector: true + logCollectorConfig: + container: + - condition: + or: + - equals: + kubernetes.container.name: log-gen-app-log4j-win + - equals: + kubernetes.container.name: log-gen-app-log4j-win-test + config: + multiLinePattern: '^\d{4}-\d{2}-\d{2}' + multiLineNegate: true + multiLineMatch: after + messageParser: + log4J: + enabled: true + pattern: "%d{yyyy-MM-dd'T'HH:mm:ss} %p %C{1.} [%t] %m%n" + - condition: + and: + - equals: + kubernetes.container.name: log-gen-app-log4j-win + - equals: + kubernetes.container.name: log-gen-app-log4j + config: + multiLinePattern: '^\d{4}-\d{2}-\d{2}' + multiLineNegate: true + multiLineMatch: after + messageParser: + log4J: + enabled: true + pattern: "%d{yyyy-MM-dd'T'HH:mm:ss} %p %C{1.} [%t] %m%n" + +appdynamics-otel-collector: + clientId: + clientSecret: + endpoint: + tokenUrl: \ No newline at end of file diff --git a/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/examples/lca-sample-configs/complex.yaml b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/examples/lca-sample-configs/complex.yaml new file mode 100644 index 00000000..5f4cb9f1 --- /dev/null +++ b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/examples/lca-sample-configs/complex.yaml @@ -0,0 +1,333 @@ +global: + clusterName: "ClusterName" +appdynamics-cloud-k8s-monitoring: + install: + clustermon: true + defaultInfraCollectors: true + logCollector: true + logCollectorConfig: + filebeatYaml: |- + filebeat.autodiscover: + providers: + - type: kubernetes + node: ${NODE_NAME} + labels.dedot: false + annotations.dedot: false + hints.enabled: true + hints.default_config.enabled: false + templates: + - condition: + equals: + kubernetes.container.name: log-gen-app-log4j + config: + - type: filestream + id: fsid-${data.kubernetes.pod.name}-${data.kubernetes.container.id} + close_removed: false + clean_removed: false + paths: + - /var/log/containers/${data.kubernetes.pod.name}*${data.kubernetes.container.id}.log + parsers: + - container: + stream: all + format: auto + - multiline: + type: pattern + pattern: '^\d{4}-\d{2}-\d{2}' + negate: true + match: after + prospector.scanner.symlinks: true + processors: + - copy_fields: + fields: + - from: kubernetes.pod.name + to: fields.k8s.pod.name + fail_on_error: false + ignore_missing: true + - copy_fields: + fields: + - from: kubernetes.deployment.name + to: fields.k8s.workload.name + fail_on_error: false + ignore_missing: true + - add_fields: + target: _message_parser + fields: + type: log4j + pattern: "%d{yyyy-MM-dd'T'HH:mm:ss} %p %C{1.} [%t] %m%n" + - condition: + equals: + kubernetes.container.name: log-gen-app-log4j2 + config: + - type: filestream + id: fsid-${data.kubernetes.pod.name}-${data.kubernetes.container.id} + close_removed: false + clean_removed: false + paths: + - /var/log/containers/${data.kubernetes.pod.name}*${data.kubernetes.container.id}.log + parsers: + - container: + stream: all + format: auto + - multiline: + type: pattern + pattern: '^\d{4}-\d{2}-\d{2}' + negate: true + match: after + prospector.scanner.symlinks: true + processors: + - copy_fields: + fields: + - from: kubernetes.pod.name + to: fields.k8s.pod.name + fail_on_error: false + ignore_missing: true + - copy_fields: + fields: + - from: kubernetes.deployment.name + to: fields.k8s.workload.name + fail_on_error: false + ignore_missing: true + - add_fields: + target: _message_parser + fields: + type: log4j + pattern: "%d{yyyy-MM-dd HH:mm:ss} %-5p %c{1}:%L - %m%n" + - condition: + equals: + kubernetes.container.name: log-gen-app-logback + config: + - type: filestream + id: fsid-${data.kubernetes.pod.name}-${data.kubernetes.container.id} + close_removed: false + clean_removed: false + paths: + - /var/log/containers/${data.kubernetes.pod.name}*${data.kubernetes.container.id}.log + parsers: + - container: + stream: all + format: auto + - multiline: + type: pattern + pattern: '^\d{4}-\d{2}-\d{2}' + negate: true + match: after + prospector.scanner.symlinks: true + processors: + - copy_fields: + fields: + - from: kubernetes.pod.name + to: fields.k8s.pod.name + fail_on_error: false + ignore_missing: true + - copy_fields: + fields: + - from: kubernetes.deployment.name + to: fields.k8s.workload.name + fail_on_error: false + ignore_missing: true + - add_fields: + target: _message_parser + fields: + type: logback + pattern: "%d{yyyy-MM-dd HH:mm:ss.SSS} [%thread] %-5level %logger{36} - %msg%n" + - condition: + equals: + kubernetes.container.name: log-gen-app-grok + config: + - type: filestream + id: fsid-${data.kubernetes.pod.name}-${data.kubernetes.container.id} + close_removed: false + clean_removed: false + paths: + - /var/log/containers/${data.kubernetes.pod.name}*${data.kubernetes.container.id}.log + parsers: + - container: + stream: all + format: auto + - multiline: + type: pattern + pattern: '^\d{4}-\d{2}-\d{2}' + negate: true + match: after + prospector.scanner.symlinks: true + processors: + - copy_fields: + fields: + - from: kubernetes.pod.name + to: fields.k8s.pod.name + fail_on_error: false + ignore_missing: true + - copy_fields: + fields: + - from: kubernetes.deployment.name + to: fields.k8s.workload.name + fail_on_error: false + ignore_missing: true + - add_fields: + target: _message_parser + fields: + type: grok + pattern: + - '%{DATESTAMP:time} %{LOGLEVEL:severity} %{WORD:class}:%{NUMBER:line} - %{GREEDYDATA:data}' + - '%{DATESTAMP_RFC2822:time} %{LOGLEVEL:severity} %{GREEDYDATA:data}' + - '%{TOMCAT_DATESTAMP:time} \| %{LOGLEVEL:level} \| %{JAVACLASS:class} - %{JAVALOGMESSAGE:logmessage}' + - '%{IP:clientIP} %{WORD:httpMethod} %{URIPATH:url}' + timestamp_field: time + timestamp_format: "yyyy-MM-dd HH:mm:ss,SSS" + - condition: + equals: + kubernetes.container.name: log-gen-app-json + config: + - type: filestream + id: fsid-${data.kubernetes.pod.name}-${data.kubernetes.container.id} + close_removed: false + clean_removed: false + paths: + - /var/log/containers/${data.kubernetes.pod.name}*${data.kubernetes.container.id}.log + parsers: + - container: + stream: all + format: auto + - multiline: + type: pattern + pattern: '^{' + negate: true + match: after + prospector.scanner.symlinks: true + processors: + - copy_fields: + fields: + - from: kubernetes.pod.name + to: fields.k8s.pod.name + fail_on_error: false + ignore_missing: true + - copy_fields: + fields: + - from: kubernetes.deployment.name + to: fields.k8s.workload.name + fail_on_error: false + ignore_missing: true + - add_fields: + target: _message_parser + fields: + type: json + timestamp_field: "@timestamp" + timestamp_pattern: "yyyy-MM-dd'T'HH:mm:ss.SSS'Z'" + - condition: + equals: + kubernetes.container.name: kube-proxy + config: + - type: filestream + id: fsid-${data.kubernetes.pod.name}-${data.kubernetes.container.id} + close_removed: false + clean_removed: false + paths: + - /var/log/containers/${data.kubernetes.pod.name}*${data.kubernetes.container.id}.log + parsers: + - container: + stream: all + format: auto + - multiline: + type: pattern + pattern: '^[a-z]|^[A-Z]' + negate: true + match: after + prospector.scanner.symlinks: true + processors: + - copy_fields: + fields: + - from: kubernetes.pod.name + to: fields.k8s.pod.name + fail_on_error: false + ignore_missing: true + - copy_fields: + fields: + - from: kubernetes.deployment.name + to: fields.k8s.workload.name + fail_on_error: false + ignore_missing: true + - add_fields: + target: _message_parser + fields: + type: infra + processors: + - add_cloud_metadata: ~ + - add_kubernetes_metadata: + in_cluster: true + host: ${NODE_NAME} + matchers: + - logs_path: + logs_path: "/var/log/containers/" + - rename: + fields: + - from: "kubernetes.namespace" + to: "kubernetes.namespace.name" + - from: "kubernetes" + to: "k8s" + - from: k8s.annotations.appdynamics.lca/filebeat.parser + to: "_message_parser" + - from: "cloud.instance.id" + to: "host.id" + ignore_missing: true + fail_on_error: false + - add_fields: + target: source + fields: + name: log-agent + - add_fields: + target: telemetry + fields: + sdk.name: log-agent + - script: + lang: javascript + source: > + function process(event) { + var podUID = event.Get("k8s.pod.uid"); + if (podUID) { + event.Put("internal.container.encapsulating_object_id", "ClusterName:" + podUID); + } + return event; + } + - drop_fields: + fields: ["agent", "stream", "ecs", "input", "orchestrator", "k8s.annotations.appdynamics", "k8s.labels", "k8s.node.labels", "cloud"] + ignore_missing: true + output.otlploggrpc: + groupby_resource_fields: + - k8s + - source + - host + - container + - log + - telemetry + - internal + - os + hosts: ["${APPD_OTELCOL_GRPC_RECEIVER_HOST}:14317"] + worker: 1 + max_bytes: 1000000 + ssl.enabled: false + ssl.supported_protocols: [TLSv1.3] + wait_for_ready: true + batch_size: 1000 + summary_debug_logs_interval: 10s + filebeat.registry.path: registry1 + filebeat.registry.file_permissions: 0640 + path.data: /opt/appdynamics/logcollector-agent/data + logging: + level: info + to_files: false + files: + path: /opt/appdynamics/logcollector-agent/log + name: lca-log + keepfiles: 5 + permissions: 0640 + selectors: [] + metrics: + enabled: false + period: 30s + monitoring: + enabled: false +appdynamics-otel-collector: + clientId: + clientSecret: + endpoint: + tokenUrl: \ No newline at end of file diff --git a/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/examples/lca-sample-configs/minimal-config.yaml b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/examples/lca-sample-configs/minimal-config.yaml new file mode 100644 index 00000000..c474de8d --- /dev/null +++ b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/examples/lca-sample-configs/minimal-config.yaml @@ -0,0 +1,27 @@ +global: + clusterName: "ClusterName" +appdynamics-cloud-k8s-monitoring: + install: + clustermon: true + defaultInfraCollectors: true + logCollector: true + logCollectorConfig: + container: + conditionalConfigs: + - condition: + operator: equals + key: kubernetes.container.name + value: + config: + multiLinePattern: '^\d{4}-\d{2}-\d{2}' + multiLineMatch: "after" + messageParser: + log4J: + enabled: true + pattern: "%d{yyyy-MM-dd'T'HH:mm:ss} %p %C{1.} [%t] %m%n" + +appdynamics-otel-collector: + clientId: + clientSecret: + endpoint: + tokenUrl: diff --git a/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/examples/lca-sample-configs/minimal-default-config.yaml b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/examples/lca-sample-configs/minimal-default-config.yaml new file mode 100644 index 00000000..7a00064e --- /dev/null +++ b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/examples/lca-sample-configs/minimal-default-config.yaml @@ -0,0 +1,34 @@ +global: + clusterName: "ClusterName" +appdynamics-cloud-k8s-monitoring: + install: + clustermon: true + defaultInfraCollectors: true + logCollector: true + logCollectorConfig: + container: + defaultConfig: + multiLinePattern: "^{" + multiLineMatch: "after" + multiLineNegate: true + messageParser: + json: + enabled: true + conditionalConfigs: + - condition: + operator: equals + key: kubernetes.container.name + value: log-gen-app-log4j + config: + multiLinePattern: '^\d{4}-\d{2}-\d{2}' + multiLineNegate: true + multiLineMatch: "after" + messageParser: + log4J: + enabled: true + pattern: "%d{yyyy-MM-dd'T'HH:mm:ss} %p %C{1.} [%t] %m%n" + +appdynamics-otel-collector: + clientId: + clientSecret: + endpoint: diff --git a/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/examples/lca-sample-configs/subparser-config.yaml b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/examples/lca-sample-configs/subparser-config.yaml new file mode 100644 index 00000000..761e4734 --- /dev/null +++ b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/examples/lca-sample-configs/subparser-config.yaml @@ -0,0 +1,38 @@ +global: + clusterName: "ClusterName" +appdynamics-cloud-k8s-monitoring: + install: + clustermon: true + defaultInfraCollectors: true + logCollector: true +logCollectorConfig: + container: + conditionalConfigs: + - condition: + or: + - equals: + kubernetes.container.name: log-gen-app-grok-sub-win + - equals: + kubernetes.container.name: log-gen-app-grok-sub + config: + multiLinePattern: '^\d{4}-\d{2}-\d{2}' + multiLineNegate: true + multiLineMatch: "after" + messageParser: + grok: + enabled: true + patterns: + - '\[%{GREEDYDATA:log4j}\] \[%{GREEDYDATA:json}\] \[%{GREEDYDATA:log4j2}\] \[%{GREEDYDATA:logback}\] \[%{IPORHOST:grok}\] \[%{GREEDYDATA:infra}\]' + timestampPattern: "yyyy-MM-dd HH:mm:ss,SSS" + subparsers: "{\\\"parsersList\\\": [{ \\\"_message_parser.type\\\": \\\"log4j\\\", \\\"_message_parser.field\\\": \\\"log4j\\\", \\\"_message_parser.pattern\\\": \\\"%d{yyyy-MM-dd HH:mm:ss} %-5p %c{1}:%L - %msg%n\\\"}, + { \\\"_message_parser.type\\\": \\\"log4j\\\", \\\"_message_parser.field\\\": \\\"log4j2\\\", \\\"_message_parser.pattern\\\": \\\"%d{yyyy-MM-dd HH:mm:ss} %-5p %c{1}:%L - %msg%n\\\"}, + { \\\"_message_parser.type\\\": \\\"logback\\\", \\\"_message_parser.field\\\": \\\"logback\\\", \\\"_message_parser.pattern\\\": \\\"%d{yyyy-MM-dd HH:mm:ss} %-5p %c{1}:%L - %msg%n\\\"}, + { \\\"_message_parser.type\\\": \\\"grok\\\", \\\"_message_parser.field\\\": \\\"grok\\\", \\\"_message_parser.pattern\\\": \\\"%{GREEDYDATA:infra}\\\"}, + { \\\"_message_parser.type\\\": \\\"infra\\\", \\\"_message_parser.field\\\": \\\"infra\\\"}, + { \\\"_message_parser.type\\\": \\\"json\\\", \\\"_message_parser.field\\\": \\\"json\\\", \\\"_message_parser.flatten_sep\\\": \\\"/\\\"}]\\r\\n}" + +appdynamics-otel-collector: + clientId: + clientSecret: + endpoint: + tokenUrl: \ No newline at end of file diff --git a/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/examples/lca-sample-configs/troubleshooting.yaml b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/examples/lca-sample-configs/troubleshooting.yaml new file mode 100644 index 00000000..37505252 --- /dev/null +++ b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/examples/lca-sample-configs/troubleshooting.yaml @@ -0,0 +1,63 @@ +global: + clusterName: "ClusterName" +appdynamics-cloud-k8s-monitoring: + install: + clustermon: true + defaultInfraCollectors: true + logCollector: true + logCollectorConfig: + container: + conditionalConfigs: + - condition: + operator: equals + key: kubernetes.container.name + value: log-gen-app-log4j + config: + multiLinePattern: '^\d{4}-\d{2}-\d{2}' + multiLineMatch: "after" + messageParser: + log4J: + enabled: true + pattern: "%d{yyyy-MM-dd'T'HH:mm:ss} %p %C{1.} [%t] %m%n" + dropFields: ["agent", "stream", "ecs", "input", "orchestrator", "k8s.annotations.appdynamics", "k8s.labels", "k8s.node.labels", "cloud"] + batchSize: 1000 # this is the default value + maxBytes: 1000000 # this is the default value + logging: + level: info + files: + # to enable logging to files + enabled: false + # number of files to keep if logging to files is enabled + keepFiles: 5 # default value + metrics: + # to enable logging metrics data + enabled: false + period: 30s # default value + # you don't need below block if you are not using/exporting metrics + monitoring: + otlpmetric: + enabled: false + metrics: + # default metrics to capture are below + - beat.memstats.memory_alloc + - filebeat.events.active + - filebeat.harvester.running + - filebeat.harvester.skipped + - filebeat.input.log.files.truncated + - libbeat.output.read.errors + - libbeat.output.write.bytes + - libbeat.output.write.errors + - system.load.norm.5 + - system.load.norm.15 + retry: + enabled: false + ssl: + enabled: true + certificateAuthorities: ["C:/filebeat/certs/ca/ca.pem"] + certificate: "C:/filebeat/certs/client/client.pem" + key: "C:/filebeat/certs/client/client-key.pem" +appdynamics-otel-collector: + clientId: + clientSecret: + endpoint: + tokenUrl: diff --git a/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/examples/logging.yaml b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/examples/logging.yaml new file mode 100644 index 00000000..2799d58c --- /dev/null +++ b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/examples/logging.yaml @@ -0,0 +1,13 @@ +global: + clusterName: "cluster-name" + +appdynamics-otel-collector: + clientId: "id" # clientId for oauth2 extension + clientSecret: "oauth-client-secret" + tokenUrl: "https://token_ur.com/oauth2l" # tokenUrl for oauth2 extension + endpoint: "https://data.appdynamics.com" # endpoint for otlphttp exporter + configOverride: + service: + pipelines: + metrics: + exporters: [ otlphttp, logging ] \ No newline at end of file diff --git a/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/examples/mtls.yaml b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/examples/mtls.yaml new file mode 100644 index 00000000..fa5408f4 --- /dev/null +++ b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/examples/mtls.yaml @@ -0,0 +1,36 @@ +global: + clusterName: "cluster-name" + tls: + appdCollectors: + enabled: true + secret: + secretName: client-secret + secretKeys: + caCert: ca.crt + tlsCert: tls.crt + tlsKey: tls.key + otelReceiver: + secret: + secretName: server-secret + secretKeys: + caCert: ca.crt + tlsCert: tls.crt + tlsKey: tls.key + settings: + min_version: 1.2 + max_version: 1.3 + mtlsEnabled: true # receiver will use server CA file for mTLS to auth the client, i.e. set client_ca_file with ca.crt + + + + +appdynamics-otel-collector: + clientId: "id" # clientId for oauth2 extension + clientSecret: "oauth-client-secret" + tokenUrl: "https://token_ur.com/oauth2l" # tokenUrl for oauth2 extension + endpoint: "https://data.appdynamics.com" # endpoint for otlphttp exporter + configOverride: + service: + pipelines: + metrics: + exporters: [ otlphttp, logging ] \ No newline at end of file diff --git a/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/examples/security-only.yaml b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/examples/security-only.yaml new file mode 100644 index 00000000..54b3e099 --- /dev/null +++ b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/examples/security-only.yaml @@ -0,0 +1,18 @@ +appdynamics-otel-collector: + install: false + +appdynamics-cloud-k8s-monitoring: + install: + clustermon: false + defaultInfraCollectors: false + +appdynamics-otel-instrumentation: + enabled: false + +appdynamics-security-collector: + enabled: true + panoptica: + controller: + agentID: "xxxx" + secret: + sharedSecret: "xxxx" diff --git a/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/examples/simple-windows.yaml b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/examples/simple-windows.yaml new file mode 100644 index 00000000..7bc0fe61 --- /dev/null +++ b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/examples/simple-windows.yaml @@ -0,0 +1,10 @@ +global: + clusterName: "cluster-name" + +appdynamics-otel-collector: + clientId: "id" # clientId for oauth2 extension + clientSecret: "oauth-client-secret" + tokenUrl: "https://token_ur.com/oauth2l" # tokenUrl for oauth2 extension + endpoint: "https://data.appdynamics.com" # endpoint for otlphttp exporter + runOn: + windows: true \ No newline at end of file diff --git a/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/examples/simple.yaml b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/examples/simple.yaml new file mode 100644 index 00000000..90551532 --- /dev/null +++ b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/examples/simple.yaml @@ -0,0 +1,8 @@ +global: + clusterName: "cluster-name" + +appdynamics-otel-collector: + clientId: "id" # clientId for oauth2 extension + clientSecret: "oauth-client-secret" + tokenUrl: "https://token_ur.com/oauth2l" # tokenUrl for oauth2 extension + endpoint: "https://data.appdynamics.com" # endpoint for otlphttp exporter \ No newline at end of file diff --git a/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/examples/tls.yaml b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/examples/tls.yaml new file mode 100644 index 00000000..21459de2 --- /dev/null +++ b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/examples/tls.yaml @@ -0,0 +1,34 @@ +global: + clusterName: "cluster-name" + tls: + appdCollectors: + enabled: true + secret: + secretName: client-secret + secretKeys: + caCert: ca.crt + tlsCert: tls.crt + tlsKey: tls.key + otelReceiver: + secret: + secretName: server-secret + secretKeys: + caCert: ca.crt + tlsCert: tls.crt + tlsKey: tls.key + settings: + min_version: 1.2 + max_version: 1.3 + + + +appdynamics-otel-collector: + clientId: "id" # clientId for oauth2 extension + clientSecret: "oauth-client-secret" + tokenUrl: "https://token_ur.com/oauth2l" # tokenUrl for oauth2 extension + endpoint: "https://data.appdynamics.com" # endpoint for otlphttp exporter + configOverride: + service: + pipelines: + metrics: + exporters: [ otlphttp, logging ] \ No newline at end of file diff --git a/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/examples/tls_exporter.yaml b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/examples/tls_exporter.yaml new file mode 100644 index 00000000..c6703c1a --- /dev/null +++ b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/examples/tls_exporter.yaml @@ -0,0 +1,20 @@ +global: + clusterName: "cluster-name" + tls: + otelExporter: + secret: + secretName: client-secret + secretKeys: + tlsCert: tls.crt + tlsKey: tls.key + +appdynamics-otel-collector: + clientId: "id" # clientId for oauth2 extension + clientSecret: "oauth-client-secret" + tokenUrl: "https://token_ur.com/oauth2l" # tokenUrl for oauth2 extension + endpoint: "https://data.appdynamics.com" # endpoint for otlphttp exporter + configOverride: + service: + pipelines: + metrics: + exporters: [ otlphttp, logging ] \ No newline at end of file diff --git a/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/templates/NOTES.txt b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/templates/NOTES.txt new file mode 100644 index 00000000..1df78adf --- /dev/null +++ b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/templates/NOTES.txt @@ -0,0 +1,63 @@ +CHART NAME: {{ .Chart.Name }} +CHART VERSION: {{ .Chart.Version }} +APP VERSION: {{ .Chart.AppVersion }} + +** Please be patient while the chart is being deployed ** + +The chart installs the following components + +1) AppDynamics OpenTelemetry Collector + +2) AppDynamics Cloud Infrastructure Collector + Enabled: {{ index .Values "appdynamics-cloud-k8s-monitoring" "install" "defaultInfraCollectors" }} + Description: Installs the Server Collector and Container Collector to monitor the host and container metrics + +3) AppDynamics Cloud Cluster Collector + Enabled: {{ index .Values "appdynamics-cloud-k8s-monitoring" "install" "clustermon" }} + Description: Installs the Cluster Collector to monitor the kubernetes metrics and events + +4) AppDynamics Cloud Log Collector + Enabled: {{ index .Values "appdynamics-cloud-k8s-monitoring" "install" "logCollector" }} + Description: Installs the Log Collector to collect the logs from applications running in kubernetes cluster + +5) AppDynamics Cloud Database Collector + Enabled: {{ index .Values "appdynamics-cloud-db-collector" "install" "dbCollector" }} + Description: Installs the DB Collector to collect metrics and monitors the Databases specified in DbConfigs + +6) Cisco Cloud Observability Network Collector + Enabled: {{ index .Values "appdynamics-network-monitoring" "enabled" }} + Description: Installs the Network Collector, to monitor network performance for applications and infrastructure +7) Appdynamics cSaaS Auto Instrumentation Agent + Enabled: {{index .Values "appdynamics-auto-instrumentation-agent" "enabled"}} + Description: Install the k8s auto instrumentation agent to instrument cSaaS entities + +7) Appdynamics cSaaS Cluster Agent + Enabled: {{ index .Values "appdynamics-csaas-k8s-cluster-agent" "installClusterAgent" }} + Description: Installs the k8s cluster agent for k8s monitoring in cSaaS controllers + +8) Appdynamics cSaaS InfraViz Agent + Enabled: {{ index .Values "appdynamics-csaas-k8s-cluster-agent" "installInfraViz" }} + Description: Installs the infraViz Agent for Infra monitoring in cSaaS controllers + +THIRD PARTY LICENSE DISCLOSURE +=============================== + +AppDynamics OpenTelemetry Collector +-------------------------------------------------- +https://www.cisco.com/c/dam/en_us/about/doing_business/open_source/docs/AppDynamics_Distribution_for_OpenTelemetry_Collector-2470-1721941458.pdf + +AppDynamics Cloud Cluster Collector +-------------------------------------------------- +https://www.cisco.com/c/dam/en_us/about/doing_business/open_source/docs/AppDynamics_Cloud_Clustermon-2470-1721901181.pdf + +AppDynamics Cloud Infrastructure Collector +-------------------------------------------------- +https://www.cisco.com/c/dam/en_us/about/doing_business/open_source/docs/AppDynamics_Cloud_Clustermon-2470-1721901181.pdf + +AppDynamics Cloud Log Collector +---------------------------- +https://www.cisco.com/c/dam/en_us/about/doing_business/open_source/docs/Appdynamics_Beats_Levitate-2440-1713840358.pdf + +AppDynamics Database Collector +---------------------------- +https://www.cisco.com/c/dam/en_us/about/doing_business/open_source/docs/Appdynamics_DB_Collector_Agent-242-1708958844.pdf \ No newline at end of file diff --git a/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/templates/_helpers.tpl b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/templates/_helpers.tpl new file mode 100644 index 00000000..231e8d96 --- /dev/null +++ b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/templates/_helpers.tpl @@ -0,0 +1,60 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "appdynamics-collectors.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "appdynamics-collectors.fullname" -}} +{{- if .Values.fullnameOverride }} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }} +{{- else }} +{{- $name := default .Chart.Name .Values.nameOverride }} +{{- if contains $name .Release.Name }} +{{- .Release.Name | trunc 63 | trimSuffix "-" }} +{{- else }} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }} +{{- end }} +{{- end }} +{{- end }} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "appdynamics-collectors.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }} +{{- end }} + + +{{/* +Selector labels +*/}} +{{- define "appdynamics-collectors.selectorLabels" -}} +app.kubernetes.io/name: {{ include "appdynamics-collectors.name" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +{{- end }} + +{{/* +Create the name of the service account to use +*/}} +{{- define "appdynamics-collectors.serviceAccountName" -}} +{{- if .Values.serviceAccount.create }} +{{- default (include "appdynamics-collectors.fullname" .) .Values.serviceAccount.name }} +{{- else }} +{{- default "default" .Values.serviceAccount.name }} +{{- end }} +{{- end }} + +{{/* +Service name of otel collector +*/}} +{{- define "appdynamics-collectors.otelCollectorService" -}} +{{- default "appdynamics-otel-collector-service" (index .Values "appdynamics-otel-collector" "service" "name" ) }} +{{- end }} + diff --git a/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/templates/common/_common.tpl b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/templates/common/_common.tpl new file mode 100644 index 00000000..ffa6bcb3 --- /dev/null +++ b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/templates/common/_common.tpl @@ -0,0 +1,11 @@ +{{/* +Common labels +*/}} +{{- define "appdynamics-collectors.labels" -}} +helm.sh/chart: {{ include "appdynamics-collectors.chart" . }} +{{ include "appdynamics-collectors.selectorLabels" . }} +{{- if .Chart.AppVersion }} +app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} +{{- end }} +app.kubernetes.io/managed-by: {{ .Release.Service }} +{{- end }} \ No newline at end of file diff --git a/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/templates/common/config-validator.yaml b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/templates/common/config-validator.yaml new file mode 100644 index 00000000..ce5a592e --- /dev/null +++ b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/templates/common/config-validator.yaml @@ -0,0 +1,11 @@ +{{- $autoInstrumentationAgentEnabled := index .Values "appdynamics-auto-instrumentation-agent" "enabled" }} +{{- $clusterAgentEnabled := index .Values "appdynamics-csaas-k8s-cluster-agent" "installClusterAgent" }} +{{- $clusterCollectorEnabled := index .Values "appdynamics-cloud-k8s-monitoring" "install" "clustermon" }} + +{{if and $autoInstrumentationAgentEnabled $clusterAgentEnabled -}} +{{ fail "Configuration Conflict - The settings 'appdynamics-auto-instrumentation-agent.enabled' and 'appdynamics-csaas-k8s-cluster-agent.installClusterAgent' must not be activated at the same time. Please choose only one option to enable automatic application performance monitoring (APM) agent instrumentation within the Kubernetes cluster." -}} +{{ end -}} + +{{if and $clusterCollectorEnabled $clusterAgentEnabled -}} +{{ fail "Overlapping Configuration - The options 'appdynamics-csaas-k8s-cluster-agent.installClusterAgent' and 'appdynamics-cloud-k8s-monitoring.install.clustermon' should not both be active at the same time. Please select only one of these for Kubernetes cluster monitoring across the distinct product lines csaas or cco." -}} +{{ end -}} \ No newline at end of file diff --git a/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/values.yaml b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/values.yaml new file mode 100644 index 00000000..c159c60b --- /dev/null +++ b/packs/appdynamics-collectors-1.22.1287/charts/appdynamics-collectors/values.yaml @@ -0,0 +1,97 @@ +global: + smartAgentInstall: false + namespace: "" + clusterName: "" + tls: + appdCollectors: + enabled: false + secret: {} + otelReceiver: + secret: {} + settings: {} + otelExporter: + secret: {} + settings: {} + helmChartVersion: 1.22.1287 +appdynamics-cloud-db-collector: + appdCloudAuth: {} + install: + dbCollector: false + dbMonitoringConfigs: false + agentManagementEnabled: true + dbCollectorPod: + image: appdynamics/appdynamics-cloud-db-collector:24.2.0-1084 + imagePullPolicy: Always + imagePullSecrets: [] + dbCollectorConfig: + pprof: + enabled : false + port : 0 + +appdynamics-cloud-k8s-monitoring: + install: + clustermon: true + defaultInfraCollectors: true + logCollector: false + + clustermonConfig: + os: linux + filters: + entity: + excludeRegex: "kube-root-ca.crt|openshift-service-ca.crt" + + servermonConfig: + os: [linux] + + containermonConfig: + os: [linux] + + clustermonPod: + image: appdynamics/appdynamics-cloud-k8s-monitoring:24.7.0-2057 + imagePullPolicy: Always + imagePullSecrets: [] + + inframonPod: + image: appdynamics/appdynamics-cloud-k8s-monitoring:24.7.0-2057 + imagePullPolicy: Always + imagePullSecrets: [] + + windowsExporterPod: + image: ghcr.io/prometheus-community/windows-exporter:0.24.0 + imagePullPolicy: Always + imagePullSecrets: [] + nodeSelector: + kubernetes.io/os: windows + + logCollectorPod: + image: appdynamics/appdynamics-cloud-log-collector-agent:24.4.0-1163 + imagePullPolicy: Always + imagePullSecrets: [] + +appdynamics-csaas-k8s-cluster-agent: + installClusterAgent: false + installInfraViz: false + +appdynamics-otel-collector: + install: true + spec: + image: appdynamics/appdynamics-cloud-otel-collector:24.7.0-1639 + mode: daemonset + imagePullPolicy: Always + +appdynamics-security-collector: + enabled: false + panoptica: + controller: + agentID: "" + secret: + sharedSecret: "" + +appdynamics-otel-instrumentation: + enabled: true + +appdynamics-network-monitoring: + enabled: false + +appdynamics-auto-instrumentation-agent: + enabled: false diff --git a/packs/appdynamics-collectors-1.22.1287/logo.png b/packs/appdynamics-collectors-1.22.1287/logo.png new file mode 100644 index 00000000..1d1df310 Binary files /dev/null and b/packs/appdynamics-collectors-1.22.1287/logo.png differ diff --git a/packs/appdynamics-collectors-1.22.1287/pack.json b/packs/appdynamics-collectors-1.22.1287/pack.json new file mode 100644 index 00000000..0b5f70d3 --- /dev/null +++ b/packs/appdynamics-collectors-1.22.1287/pack.json @@ -0,0 +1,29 @@ +{ + "addonType": "monitoring", + "annotations": { + "ingressSupported": "false", + "source": "community", + "contributor": "appdynamics" + }, + "cloudTypes": [ + "all" + ], + "charts": [ + "charts/appdynamics-collectors-1.22.1287.tgz" + ], + "displayName": "Cisco AppDynamics Collectors", + "layer": "addon", + "name": "cisco-appdynamics-collectors", + "version": "1.22.1287", + "constraints": { + "dependencies": [ + { + "layer": "addon", + "packName": "cisco-appdynamics-operators", + "minVersion": "1.22.0", + "maxVersion": "", + "type": "required" + } + ] + } +} diff --git a/packs/appdynamics-collectors-1.22.1287/values.yaml b/packs/appdynamics-collectors-1.22.1287/values.yaml new file mode 100644 index 00000000..db64142c --- /dev/null +++ b/packs/appdynamics-collectors-1.22.1287/values.yaml @@ -0,0 +1,100 @@ +pack: + namespace: "appdynamics" + spectrocloud.com/install-priority: "1" + releaseNameOverride: + appdynamics-collectors: collector + content: + images: + - image: appdynamics/appdynamics-cloud-db-collector:24.2.0-1084 + - image: appdynamics/appdynamics-cloud-k8s-monitoring:24.7.0-2057 + - image: ghcr.io/prometheus-community/windows-exporter:0.24.0 + - image: appdynamics/appdynamics-cloud-log-collector-agent:24.4.0-1163 + - image: appdynamics/appdynamics-cloud-otel-collector:24.7.0-1639 + charts: + - repo: https://appdynamics.jfrog.io/artifactory/appdynamics-cloud-helmcharts/ + name: appdynamics-collectors + version: 1.22.1287 +charts: + appdynamics-collectors: + global: + clusterName: + oauth: + clientId: + clientSecret: + endpoint: + tokenUrl: + tls: + appdCollectors: + enabled: false + secret: {} + otelReceiver: + secret: {} + settings: {} + otelExporter: + secret: {} + settings: {} + helmChartVersion: 1.22.1287 + appdynamics-cloud-db-collector: + appdCloudAuth: {} + install: + dbCollector: false + dbMonitoringConfigs: false + dbCollectorPod: + image: appdynamics/appdynamics-cloud-db-collector:24.2.0-1084 + imagePullPolicy: Always + imagePullSecrets: [] + dbCollectorConfig: + pprof: + enabled: false + port: 0 + appdynamics-cloud-k8s-monitoring: + install: + clustermon: true + defaultInfraCollectors: true + logCollector: false + clustermonConfig: + os: linux + filters: + entity: + excludeRegex: "kube-root-ca.crt|openshift-service-ca.crt" + servermonConfig: + os: [linux] + containermonConfig: + os: [linux] + clustermonPod: + image: appdynamics/appdynamics-cloud-k8s-monitoring:24.7.0-2057 + imagePullPolicy: Always + imagePullSecrets: [] + inframonPod: + image: appdynamics/appdynamics-cloud-k8s-monitoring:24.7.0-2057 + imagePullPolicy: Always + imagePullSecrets: [] + windowsExporterPod: + image: ghcr.io/prometheus-community/windows-exporter:0.24.0 + imagePullPolicy: Always + imagePullSecrets: [] + nodeSelector: + kubernetes.io/os: windows + logCollectorPod: + image: appdynamics/appdynamics-cloud-log-collector-agent:24.4.0-1163 + imagePullPolicy: Always + imagePullSecrets: [] + appdynamics-otel-collector: + clientId: + clientSecret: + tokenUrl: + endpoint: + install: true + spec: + image: appdynamics/appdynamics-cloud-otel-collector:24.7.0-1639 + mode: daemonset + imagePullPolicy: Always + appdynamics-security-collector: + enabled: false + panoptica: + controller: + agentID: "" + secret: + sharedSecret: "" + appdynamics-otel-instrumentation: + enabled: true