From d79afc68d37924ee11a93046959216312e9efd00 Mon Sep 17 00:00:00 2001 From: Gavin Halliday Date: Tue, 30 Apr 2024 15:27:41 +0100 Subject: [PATCH] HPCC-31661 Add options to generate cpu resources as limits Signed-off-by: Gavin Halliday --- helm/hpcc/templates/_helpers.tpl | 58 +- helm/hpcc/templates/dafilesrv.yaml | 5 +- helm/hpcc/templates/dali.yaml | 5 +- helm/hpcc/templates/dfuserver.yaml | 5 +- helm/hpcc/templates/eclagent.yaml | 16 +- helm/hpcc/templates/eclccserver.yaml | 18 +- helm/hpcc/templates/eclscheduler.yaml | 5 +- helm/hpcc/templates/esp.yaml | 5 +- helm/hpcc/templates/localroxie.yaml | 5 +- helm/hpcc/templates/roxie.yaml | 15 +- helm/hpcc/templates/thor.yaml | 31 +- helm/hpcc/values.schema.json | 8 + testing/helm/tests/resourced2.yaml | 869 ++++++++++++++++++++++++++ 13 files changed, 940 insertions(+), 105 deletions(-) create mode 100644 testing/helm/tests/resourced2.yaml diff --git a/helm/hpcc/templates/_helpers.tpl b/helm/hpcc/templates/_helpers.tpl index 73f2f35178b..97ca1aa35e6 100644 --- a/helm/hpcc/templates/_helpers.tpl +++ b/helm/hpcc/templates/_helpers.tpl @@ -1316,18 +1316,38 @@ Pass in a dictionary with me defined */}} {{- define "hpcc.addResources" }} {{- $resources := .me | default .defaults }} -{{- if $resources }} - {{- $limits := omit $resources "cpu" }} - {{- $requests := pick $resources "cpu" }} +{{- $omitResources := .root.Values.global.omitResources | default false }} +{{- $resourceCpusWithLimits := hasKey .root.Values.global "resourceCpusWithLimits" | ternary .root.Values.global.resourceCpusWithLimits false -}} +{{- $resourceWholeCpusWithLimits := hasKey .root.Values.global "resourceWholeCpusWithLimits" | ternary .root.Values.global.resourceWholeCpusWithLimits false -}} +{{- if not $omitResources }} +{{- if $resources }} +{{- $limits := omit $resources "cpu" }} +{{- $requests := dict }} +{{- if hasKey $resources "cpu" -}} +{{- $cpu := $resources.cpu }} +{{- if $resourceCpusWithLimits -}} +{{- $_ := set $limits "cpu" $cpu -}} +{{- else if $resourceWholeCpusWithLimits -}} +{{- $milliCPUs := int (include "hpcc.k8sCPUStringToMilliCPU" $cpu) }} +{{- if eq (mod $milliCPUs 1000) 0 -}} +{{- $_ := set $limits "cpu" $cpu -}} +{{- else -}} +{{- $_ := set $requests "cpu" $cpu -}} +{{- end -}} +{{- else -}} +{{- $_ := set $requests "cpu" $cpu -}} +{{- end -}} +{{- end }} resources: - {{- if $limits }} +{{- if $limits }} limits: - {{- toYaml $limits | nindent 4 }} - {{- end -}} - {{- if $requests }} +{{- toYaml $limits | nindent 4 }} +{{- end -}} +{{- if $requests }} requests: - {{- toYaml $requests | nindent 4 -}} - {{- end -}} +{{- toYaml $requests | nindent 4 -}} +{{- end -}} +{{- end -}} {{- end -}} {{- end -}} @@ -1336,16 +1356,11 @@ Add resources object for stub pods Pass in dict with root, me and instances defined */}} {{- define "hpcc.addStubResources" -}} -{{- $stubInstanceResources := .stubResources | default .root.Values.global.stubInstanceResources | default dict }} -{{- $milliCPUText := $stubInstanceResources.cpu | default "200m" }} -{{- $milliCPUs := int (include "hpcc.k8sCPUStringToMilliCPU" $milliCPUText) }} -{{- $memoryText := $stubInstanceResources.memory | default "400Mi" }} -{{- $memory := int64 (include "hpcc.k8sMemoryStringToBytes" $memoryText) }} -resources: - limits: - memory: {{ include "hpcc.bytesToK8sMemoryString" $memory | quote }} - requests: - cpu: {{ printf "%dm" $milliCPUs | quote }} +{{- $stubInstanceResources := .me | default .root.Values.global.stubInstanceResources | default dict }} +{{- $cpuResource := $stubInstanceResources.cpu | default "200m" }} +{{- $memoryResource := $stubInstanceResources.memory | default "400Mi" }} +{{- $resources := dict "memory" $memoryResource "cpu" $cpuResource -}} +{{- include "hpcc.addResources" (dict "me" $resources "root" .root) }} {{- end -}} {{/* @@ -1404,10 +1419,7 @@ Pass in dict with root, me and dali if container in dali pod "--service={{ .me.name }}", {{ include "hpcc.daliArg" (dict "root" .root "component" "Sasha" "optional" false "overrideDaliHost" $overrideDaliHost "overrideDaliPort" $overrideDaliPort) | indent 10 }} ] -{{- $omitResources := .root.Values.global.omitResources | default false }} -{{- if not $omitResources }} -{{- include "hpcc.addResources" (dict "me" .me.resources) | indent 2 }} -{{- end }} +{{- include "hpcc.addResources" (dict "me" .me.resources "root" .root) | indent 2 }} {{- include "hpcc.addSecurityContext" . | indent 2 }} env: {{ include "hpcc.mergeEnvironments" $env | indent 2 -}} diff --git a/helm/hpcc/templates/dafilesrv.yaml b/helm/hpcc/templates/dafilesrv.yaml index 1d451b53bf5..491e7e537b7 100644 --- a/helm/hpcc/templates/dafilesrv.yaml +++ b/helm/hpcc/templates/dafilesrv.yaml @@ -74,10 +74,7 @@ spec: value: "/tmp/{{ .name }}.sentinel" {{ include "hpcc.addSentinelProbes" . | indent 8 }} {{- include "hpcc.addSecurityContext" $commonCtx | indent 8 }} -{{- $omitResources := $.Values.global.omitResources | default false }} -{{- if not $omitResources }} -{{- include "hpcc.addResources" (dict "me" .resources) | indent 8 }} -{{- end }} +{{- include "hpcc.addResources" (dict "me" .resources "root" $) | indent 8 }} {{ include "hpcc.addImageAttrs" $commonCtx | indent 8 }} volumeMounts: {{ include "hpcc.addConfigMapVolumeMount" . | indent 8 }} diff --git a/helm/hpcc/templates/dali.yaml b/helm/hpcc/templates/dali.yaml index 6f701b77fdb..089b976d1aa 100644 --- a/helm/hpcc/templates/dali.yaml +++ b/helm/hpcc/templates/dali.yaml @@ -116,10 +116,7 @@ spec: value: "/tmp/{{ $dali.name }}.sentinel" {{ include "hpcc.addSentinelProbes" $dali | indent 8 }} {{- include "hpcc.addSecurityContext" $commonCtx | indent 8 }} -{{- $omitResources := $.Values.global.omitResources | default false }} -{{- if not $omitResources }} -{{- include "hpcc.addResources" (dict "me" $dali.resources) | indent 8 }} -{{- end }} +{{- include "hpcc.addResources" (dict "me" $dali.resources "root" $) | indent 8 }} {{ include "hpcc.addImageAttrs" $commonCtx | indent 8 }} volumeMounts: {{ include "hpcc.addConfigMapVolumeMount" $dali | indent 8 }} diff --git a/helm/hpcc/templates/dfuserver.yaml b/helm/hpcc/templates/dfuserver.yaml index ce1c3dacb3e..25a1ee5ed97 100644 --- a/helm/hpcc/templates/dfuserver.yaml +++ b/helm/hpcc/templates/dfuserver.yaml @@ -84,10 +84,7 @@ spec: value: "/tmp/{{ .name }}.sentinel" {{ include "hpcc.addSentinelProbes" . | indent 8 }} {{- include "hpcc.addSecurityContext" $commonCtx | indent 8 }} -{{- $omitResources := $.Values.global.omitResources | default false }} -{{- if not $omitResources }} -{{- include "hpcc.addResources" (dict "me" .resources) | indent 8 }} -{{- end }} +{{- include "hpcc.addResources" (dict "me" .resources "root" $) | indent 8 }} {{ include "hpcc.addImageAttrs" $commonCtx | indent 8 }} volumeMounts: {{- include "hpcc.addVolumeMounts" $commonCtx | indent 8 }} diff --git a/helm/hpcc/templates/eclagent.yaml b/helm/hpcc/templates/eclagent.yaml index e56f9324fe6..5005687806c 100644 --- a/helm/hpcc/templates/eclagent.yaml +++ b/helm/hpcc/templates/eclagent.yaml @@ -82,10 +82,7 @@ data: - name: {{ $appJobName }} {{- include "hpcc.addSecurityContext" . | indent 12 }} {{ include "hpcc.addImageAttrs" . | indent 12 }} -{{- $omitResources := .root.Values.global.omitResources | default false }} -{{- if not $omitResources }} -{{- include "hpcc.addResources" (dict "me" .me.resources) | indent 12 }} -{{- end }} +{{- include "hpcc.addResources" (dict "me" .me.resources "root" .root) | indent 12 }} {{- $appCmd := printf "%s %s %s _HPCC_ARGS_" $apptype (include "hpcc.configArg" .me) (include "hpcc.daliArg" (dict "root" .root "component" "ECL Agent" "optional" false )) }} {{ include "hpcc.addCommandAndLifecycle" (. | merge (dict "command" $appCmd)) | indent 12 }} env: @@ -167,13 +164,10 @@ spec: value: "/tmp/{{ .name }}.sentinel" {{ include "hpcc.addSentinelProbes" . | indent 8 }} {{- include "hpcc.addSecurityContext" $commonCtx | indent 8 }} -{{- $omitResources := $.Values.global.omitResources | default false }} -{{- if not $omitResources }} -{{- if .useChildProcesses }} -{{- include "hpcc.addResources" (dict "me" .resources) | indent 8 }} -{{- else }} -{{- include "hpcc.addStubResources" ($commonCtx | merge (dict "stubResources" .stubResources)) | indent 8 }} -{{- end }} +{{- if .useChildProcesses }} +{{- include "hpcc.addResources" (dict "me" .resources "root" $) | indent 8 }} +{{- else }} +{{- include "hpcc.addStubResources" (dict "me" .stubResources "root" $) | indent 8 }} {{- end }} {{ include "hpcc.addImageAttrs" $commonCtx | indent 8 }} volumeMounts: diff --git a/helm/hpcc/templates/eclccserver.yaml b/helm/hpcc/templates/eclccserver.yaml index 1b1d3301ddd..78b1bd91ffa 100644 --- a/helm/hpcc/templates/eclccserver.yaml +++ b/helm/hpcc/templates/eclccserver.yaml @@ -77,10 +77,7 @@ data: {{- end }} - name: {{ $compileJobName }} {{- include "hpcc.addSecurityContext" . | indent 12 }} -{{- $omitResources := .root.Values.global.omitResources | default false }} -{{- if not $omitResources }} -{{- include "hpcc.addResources" (dict "me" .me.resources) | indent 12 }} -{{- end }} +{{- include "hpcc.addResources" (dict "me" .me.resources "root" .root) | indent 12 }} {{ include "hpcc.addImageAttrs" . | indent 12 }} {{- $misc := .root.Values.global.misc | default dict -}} {{- $postJobCommand := $misc.postJobCommand | default "" }} @@ -175,14 +172,11 @@ spec: value: "/tmp/{{ .name }}.sentinel" {{ include "hpcc.addSentinelProbes" . | indent 8 }} {{- include "hpcc.addSecurityContext" $commonCtx | indent 8 }} -{{- $omitResources := $.Values.global.omitResources | default false }} -{{- if not $omitResources }} -{{- if .useChildProcesses }} -{{- include "hpcc.addResources" (dict "me" .resources) | indent 8 }} -{{- else }} -{{- $defaultResources := dict "cpu" "1" "memory" "1Gi" }} -{{- include "hpcc.addResources" (dict "me" .timedChildResources "defaults" $defaultResources) | indent 8 }} -{{- end }} +{{- if .useChildProcesses }} +{{- include "hpcc.addResources" (dict "me" .resources "root" $) | indent 8 }} +{{- else }} +{{- $defaultResources := dict "cpu" "1" "memory" "1Gi" }} +{{- include "hpcc.addResources" (dict "me" .timedChildResources "defaults" $defaultResources "root" $) | indent 8 }} {{- end }} {{ include "hpcc.addImageAttrs" $commonCtx | indent 8 }} volumeMounts: diff --git a/helm/hpcc/templates/eclscheduler.yaml b/helm/hpcc/templates/eclscheduler.yaml index 3d6e28a17e8..b6fb8310479 100644 --- a/helm/hpcc/templates/eclscheduler.yaml +++ b/helm/hpcc/templates/eclscheduler.yaml @@ -93,11 +93,8 @@ spec: value: "/tmp/{{ .name }}.sentinel" {{ include "hpcc.addSentinelProbes" . | indent 8 }} {{- include "hpcc.addSecurityContext" $commonCtx | indent 8 }} -{{- $omitResources := $.Values.global.omitResources | default false }} -{{- if not $omitResources }} {{- $defaultResources := dict "cpu" "500m" "memory" "200Mi" }} -{{- include "hpcc.addResources" (dict "me" .resources "defaults" $defaultResources) | indent 8 }} -{{- end }} +{{- include "hpcc.addResources" (dict "me" .resources "defaults" $defaultResources "root" $) | indent 8 }} {{ include "hpcc.addImageAttrs" $commonCtx | indent 8 }} volumeMounts: {{ include "hpcc.addConfigMapVolumeMount" . | indent 8 }} diff --git a/helm/hpcc/templates/esp.yaml b/helm/hpcc/templates/esp.yaml index 3390363f884..d5537ee7a1c 100644 --- a/helm/hpcc/templates/esp.yaml +++ b/helm/hpcc/templates/esp.yaml @@ -155,10 +155,7 @@ spec: value: "/tmp/{{ .name }}.sentinel" {{ include "hpcc.addSentinelProbes" . | indent 8 }} {{- include "hpcc.addSecurityContext" $commonCtx | indent 8 }} -{{- $omitResources := $.Values.global.omitResources | default false }} -{{- if not $omitResources }} -{{- include "hpcc.addResources" (dict "me" .resources) | indent 8 }} -{{- end }} +{{- include "hpcc.addResources" (dict "me" .resources "root" $) | indent 8 }} {{ include "hpcc.addImageAttrs" $commonCtx | indent 8 }} volumeMounts: {{ include "hpcc.addConfigMapVolumeMount" . | indent 8 }} diff --git a/helm/hpcc/templates/localroxie.yaml b/helm/hpcc/templates/localroxie.yaml index fd804eaf6f4..a36c9f13231 100644 --- a/helm/hpcc/templates/localroxie.yaml +++ b/helm/hpcc/templates/localroxie.yaml @@ -114,10 +114,7 @@ spec: {{- end }} {{- end }} {{- include "hpcc.addSecurityContext" $commonCtx | indent 8 }} -{{- $omitResources := $.Values.global.omitResources | default false }} -{{- if not $omitResources }} -{{- include "hpcc.addResources" (dict "me" $roxie.resources) | indent 8 }} -{{- end }} +{{- include "hpcc.addResources" (dict "me" $roxie.resources "root" $) | indent 8 }} {{ include "hpcc.addImageAttrs" $commonCtx | indent 8 }} volumeMounts: {{ include "hpcc.addConfigMapVolumeMount" . | indent 8 }} diff --git a/helm/hpcc/templates/roxie.yaml b/helm/hpcc/templates/roxie.yaml index b3ee4780a04..5fce70b5aec 100644 --- a/helm/hpcc/templates/roxie.yaml +++ b/helm/hpcc/templates/roxie.yaml @@ -146,11 +146,8 @@ spec: - name: {{ $commonCtx.toponame | quote }} {{ include "hpcc.addSentinelProbes" $toposerver | indent 8 }} {{- include "hpcc.addSecurityContext" $commonCtx | indent 8 }} -{{- $omitResources := $.Values.global.omitResources | default false }} -{{- if not $omitResources }} {{- $defaultResources := dict "cpu" "500m" "memory" "200Mi" }} -{{- include "hpcc.addResources" (dict "me" .topoResources "defaults" $defaultResources) | indent 8 }} -{{- end }} +{{- include "hpcc.addResources" (dict "me" .topoResources "defaults" $defaultResources "root" $) | indent 8 }} {{ include "hpcc.addImageAttrs" $commonCtx | indent 8 }} workingDir: /var/lib/HPCCSystems command: [ {{ include "hpcc.componentCommand" (dict "me" $toposerver "root" $ "process" "toposerver") }} ] @@ -300,10 +297,7 @@ spec: command: ["testsocket", ".", "control:closedown"] {{ include "hpcc.addSentinelProbes" ( $roxie | merge (dict "readyProbeName" ".ready" )) | indent 8 }} {{ include "hpcc.addSecurityContext" (dict "root" $ "me" .) | indent 8 }} -{{- $omitResources := $.Values.global.omitResources | default false }} -{{- if not $omitResources }} -{{- include "hpcc.addResources" (dict "me" ($roxie.serverResources | default $roxie.resources)) | indent 8 }} -{{- end }} +{{- include "hpcc.addResources" (dict "me" ($roxie.serverResources | default $roxie.resources) "root" $) | indent 8 }} {{ include "hpcc.addImageAttrs" $commonCtx | indent 8 }} volumeMounts: {{ include "hpcc.addConfigMapVolumeMount" $roxie | indent 8 }} @@ -411,10 +405,7 @@ spec: {{ include "hpcc.addSentinelProbes" ( $roxie | merge (dict "readyProbeName" ".ready" )) | indent 8 }} {{- end }} {{ include "hpcc.addSecurityContext" (dict "root" $ "me" .) | indent 8 }} -{{- $omitResources := $.Values.global.omitResources | default false }} -{{- if not $omitResources }} -{{- include "hpcc.addResources" (dict "me" ($roxie.channelResources | default $roxie.resources)) | indent 8 }} -{{- end }} +{{- include "hpcc.addResources" (dict "me" ($roxie.channelResources | default $roxie.resources) "root" $) | indent 8 }} {{ include "hpcc.addImageAttrs" $commonCtx | indent 8 }} volumeMounts: {{ include "hpcc.addConfigMapVolumeMount" $roxie | indent 8 }} diff --git a/helm/hpcc/templates/thor.yaml b/helm/hpcc/templates/thor.yaml index cf75466ffbf..c044dd5dbd9 100644 --- a/helm/hpcc/templates/thor.yaml +++ b/helm/hpcc/templates/thor.yaml @@ -109,10 +109,7 @@ data: - name: {{ $eclAgentJobName }} {{- include "hpcc.addSecurityContext" . | indent 12 }} {{ include "hpcc.addImageAttrs" . | indent 12 }} -{{- $omitResources := .root.Values.global.omitResources | default false }} -{{- if not $omitResources }} -{{- include "hpcc.addResources" (dict "me" .eclAgentResources) | indent 12 }} -{{- end }} +{{- include "hpcc.addResources" (dict "me" .eclAgentResources "root" .root) | indent 12 }} {{- $agentCmd := printf "%s %s %s _HPCC_ARGS_" $eclAgentType (include "hpcc.configArg" .me) (include "hpcc.daliArg" (dict "root" .root "component" "Thor" "optional" false)) }} {{ include "hpcc.addCommandAndLifecycle" (. | merge (dict "command" $agentCmd)) | indent 12 }} env: @@ -179,10 +176,7 @@ data: - name: {{ $thorManagerJobName }} {{- include "hpcc.addSecurityContext" . | indent 12 }} {{ include "hpcc.addImageAttrs" . | indent 12 }} -{{- $omitResources := .root.Values.global.omitResources | default false }} -{{- if not $omitResources }} -{{- include "hpcc.addResources" (dict "me" $thorScope.managerResources) | indent 12 }} -{{- end }} +{{- include "hpcc.addResources" (dict "me" $thorScope.managerResources "root" .root) | indent 12 }} {{- $thorManagerCmd := printf "thormaster_lcr %s %s _HPCC_ARGS_" (include "hpcc.configArg" .me) (include "hpcc.daliArg" (dict "root" .root "component" "Thor" "optional" false)) }} {{ include "hpcc.addCommandAndLifecycle" (. | merge (dict "command" $thorManagerCmd)) | indent 12 }} env: @@ -251,10 +245,7 @@ data: - name: {{ $thorWorkerJobName }}-{{ $containerNum }} {{- include "hpcc.addSecurityContext" $configCtx | indent 12 }} {{ include "hpcc.addImageAttrs" $configCtx | indent 12 }} -{{- $omitResources := $configCtx.root.Values.global.omitResources | default false }} -{{- if not $omitResources }} -{{- include "hpcc.addResources" (dict "me" $thorScope.workerResources) | indent 12 }} -{{- end }} +{{- include "hpcc.addResources" (dict "me" $thorScope.workerResources "root" $configCtx.root) | indent 12 }} {{- $thorWorkerCmd := printf "thorslave_lcr %s %s _HPCC_ARGS_ --slaveport=%d" (include "hpcc.configArg" $configCtx.me) (include "hpcc.daliArg" (dict "root" $configCtx.root "component" "Thor" "optional" false)) $slavePort }} {{ include "hpcc.addCommandAndLifecycle" ($configCtx | merge (dict "command" $thorWorkerCmd)) | indent 12 }} env: @@ -390,13 +381,10 @@ spec: - name: "SENTINEL" value: "/tmp/{{ $commonCtx.eclAgentName }}.sentinel" {{- include "hpcc.addSecurityContext" $commonCtx | indent 8 }} -{{- $omitResources := $.Values.global.omitResources | default false }} -{{- if not $omitResources }} -{{- if $commonCtx.eclAgentUseChildProcesses }} -{{- include "hpcc.addResources" (dict "me" .eclAgentResources) | indent 8 }} -{{- else }} -{{- include "hpcc.addStubResources" ($commonCtx | merge (dict "stubResources" .stubResources)) | indent 8 }} -{{- end }} +{{- if $commonCtx.eclAgentUseChildProcesses }} +{{- include "hpcc.addResources" (dict "me" .eclAgentResources "root" $) | indent 8 }} +{{- else }} +{{- include "hpcc.addStubResources" (dict "me" .stubResources "root" $) | indent 8 }} {{- end }} {{ include "hpcc.addImageAttrs" $commonCtx | indent 8 }} volumeMounts: @@ -456,10 +444,7 @@ spec: - name: "SENTINEL" value: "/tmp/{{ $commonCtx.thorAgentName }}.sentinel" {{- include "hpcc.addSecurityContext" $commonCtx | indent 8 }} -{{- $omitResources := $.Values.global.omitResources | default false }} -{{- if not $omitResources }} -{{- include "hpcc.addStubResources" ($commonCtx | merge (dict "stubResources" .stubResources)) | indent 8 }} -{{- end }} +{{- include "hpcc.addStubResources" (dict "me" .stubResources "root" $ ) | indent 8 }} {{ include "hpcc.addImageAttrs" $commonCtx | indent 8 }} volumeMounts: {{ include "hpcc.addConfigMapVolumeMount" . | indent 8 }} diff --git a/helm/hpcc/values.schema.json b/helm/hpcc/values.schema.json index 4de707b13a0..ec474465ee4 100644 --- a/helm/hpcc/values.schema.json +++ b/helm/hpcc/values.schema.json @@ -273,6 +273,14 @@ "description": "if set, no resource definitions are generated from the helm charts", "type": "boolean" }, + "resourceCpusWithLimits": { + "description": "if set, cpu resources are provided as limits rather than requests. Not recommended because this can lead to painful throttling on roxie", + "type": "boolean" + }, + "resourceWholeCpusWithLimits": { + "description": "if set, whole cpu resource are provided as limits rather than requests. On K8s systems where cpuManagerPolicy=static this allows binding using affinities.", + "type": "boolean" + }, "privileged": { "type": "boolean" }, diff --git a/testing/helm/tests/resourced2.yaml b/testing/helm/tests/resourced2.yaml new file mode 100644 index 00000000000..6b4f95d615f --- /dev/null +++ b/testing/helm/tests/resourced2.yaml @@ -0,0 +1,869 @@ +# Default values for hpcc. + +global: + # Settings in the global section apply to all HPCC components in all subcharts + resourceCpusWithLimits: false + resourceWholeCpusWithLimits: true + + image: + ## It is recommended to name a specific version rather than latest, for any non-trivial deployment + ## For best results, the helm chart version and platform version should match, which is the default if version is + ## not specified. Do not override without good reason as undefined behavior may result. + ## version: x.y.z + root: "hpccsystems" # change this if you want to pull your images from somewhere other than DockerHub hpccsystems + pullPolicy: IfNotPresent + ## If you need to provide credentials to pull your image, they should be added as a k8s secret, and the secret name provided here + # imagePullSecrets: xxx + + ## busybox image is used for some initialization/termination tasks - you can override the location here + #busybox: "myrepo/busybox:stable" + + ## It is possible (but not recommended) to change the uid/gid that the HPCC containers run under + ## user: + ## uid: 10000 + ## gid: 10001 + + # logging sets the default logging information for all components. Can be overridden locally + logging: + detail: 80 + + # tracing sets the default tracing information for all components. Can be overridden locally + tracing: + disabled: false + alwaysCreateTraceIds: true + + ## resource settings for stub components + stubInstanceResources: + memory: "200Mi" + cpu: "1000m" + + ## env adds default environment variables for all components. Environment settings can also be added or overridden locally + #env: + #- name: SMTPserver + # value: mysmtpserver + + # Specify a defaultEsp to control which eclservices service is returned from Std.File.GetEspURL, and other uses + # If not specified, the first esp component that exposes eclservices application is assumed. + # Can also be overridden locally in individual components + ## defaultEsp: eclservices + + egress: + ## If restricted is set, NetworkPolicies will include egress restrictions to allow connections from pods only to the minimum required by the system + ## Set to false to disable all egress policy restrictions (not recommended) + restricted: true + + ## The kube-system namespace is not generally labelled by default - to enable more restrictive egress control for dns lookups we need to be told the label + ## If not provided, DNS lookups on port 53 will be allowed to connect anywhere + ## The namespace may be labelled using a command such as "kubectl label namespace kube-system name=kube-system" + # kubeSystemLabel: "kube-system" + + ## To properly allow access to the kubectl API from pods that need it, the cidr of the kubectl endpoint needs to be supplied + ## This may be obtained via "kubectl get endpoints --namespace default kubernetes" + ## If these are not supplied, egress controls will allow access to any IPs/ports from any pod where API access is needed + # kubeApiCidr: 172.17.0.3/32 + # kubeApiPort: 7443 + + ## named egress sections defined here, can be referenced by components, or they can define their own egress section explicitly + #engineEgress: + #- to: + # - ipBlock: + # cidr: 10.9.8.7/32 + # ports: + # - protocol: TCP + # port: 443 + + + cost: + currencyCode: USD + # The following are example pricing based on standard Azure pricing and should be updated to reflect actual rates + perCpu: 0.0565000000001 # D64ds_v4 compute node ($2,639.68/month for 64 vCPU) + storageAtRest: 0.0208000000001 # Blob storage pricing (East US/Flag NS/LRS redundancy/Hot) + storageReads: 0.00400000000001 # Blob storage pricing (East US/Flag NS/LRS redundancy/Hot) + storageWrites: 0.0500000000001 # Blob storage pricing (East US/Flag NS/LRS redundancy/Hot) + + # postJobCommand will execute at the end of a dynamically launched K8s job, + # when the main entrypoint process finishes, or if the readiness probes trigger a preStop event. + # This can be useful if injected sidecars are installed that need to be told to stop. + # If they are not stopped, the pod continues running with the side car container only, in a "NotReady" state. + # An example of this is the Istio envoy side car. It can be stopped with the command below. + # Set postJobCommandViaSidecar to true, if the command needs to run with privilege, this will enable the command + # to run as root in a sidecar in same process space as other containers, allowing it to for example send signals + # to processes in sidecars + # misc: + # postJobCommand: "curl -sf -XPOST http://127.0.0.1:15020/quitquitquit" + # Or example for linkerd + # postJobCommand: "kill $(pgrep linkerd2-proxy)" + # postJobCommandViaSidecar: true + + ## visibilities section can be used to set labels, annotations and service type for any service with the specified visibility + visibilities: + cluster: + type: ClusterIP + local: + annotations: + # This annotation will make azure load balancer use an internal rather than an internet-visible address + # May want different values on different cloud providers or use-cases. For example on AWS you may want to use + #service.beta.kubernetes.io/aws-load-balancer-internal: "true" + service.beta.kubernetes.io/azure-load-balancer-internal: "true" + type: LoadBalancer + # If ingress is specified, an ingress Network Policy will be created for any pod implementing a service with this visibility + # Default allows ingress from anywhere, but more restrictive rules can be used if preferred. + # Ingress rules can also be overridden by individual services + ingress: + - {} + global: + #labels: + # mylabel: "4" + type: LoadBalancer + ingress: + - {} + ## CIDRS allowed to access this service. + #loadBalancerSourceRanges: [1.2.3.4/32, 5.6.7.8/32] + + # example expert section. The sysctl list will be applied to each pod in a privileged init container + # expert: + # sysctl: + # - kernel.dmesg_restrict=0 + +# For pod placement instruction and examples please reference docs/placements.md +# The following is for tolerations of Spot Node Pool on Azure. Other cloud providers +# may have different taints for Spot Node Pool. The tolerations are harmless when +# there is no taint on the node pool. +#placements: +# - pods: ["all"] +# placement: +# tolerations: +# - key: "kubernetes.azure.com/scalesetpriority" +# operator: "Equal" +# value: "spot" +# effect: "NoSchedule" + +security: + eclSecurity: + # Possible values: + # allow - functionality is permitted + # deny - functionality is not permitted + # allowSigned - functionality permitted only if code signed + embedded: "allow" + pipe: "allow" + extern: "allow" + datafile: "allow" + +## storage: +## +## 1. If an engine component has the dataPlane property set, then that plane will be the default data location for that component. +## 2. If there is a plane definition with a category of "data" then the first matching plane will be the default data location +## +## If a data plane contains the storageClass property then an implicit pvc will be created for that data plane. +## +## If plane.pvc is defined, a Persistent Volume Claim must exist with that name, storageClass and storageSize are not used. +## +## If plane.storageClass is defined, storageClassName: +## If set to "-", storageClassName: "", which disables dynamic provisioning +## If set to "", choosing the default provisioner. (gp2 on AWS, standard on GKE, AWS & OpenStack) +## +## plane.forcePermissions=true is required by some types of provisioned +## storage, where the mounted filing system has insufficient permissions to be +## read by the hpcc pods. Examples include using hostpath storage (e.g. on +## minikube and docker for desktop), or using NFS mounted storage. + +storage: + planes: + # name: + # prefix: # Root directory for accessing the plane (if pvc defined), or url to access plane. + # category: data|dali|lz|dll|spill|temp # What category of data is stored on this plane? + # + # For dynamic pvc creation: + # storageClass: '' + # storageSize: 1Gi + # + # For persistent storage: + # pvc: # The name of the persistant volume claim + # forcePermissions: false + # hosts: [ ] # Inline list of hosts + # hostGroup: # Name of the host group for bare metal - must match the name of the storage plane.. + # + # Other options: + # subPath: # Optional sub directory within to use as the root directory + # numDevices: 1 # number of devices that are part of the plane + # secret: # what secret is required to access the files. This could optionally become a list if required (or add secrets:). + # defaultSprayParts: 4 # The number of partitions created when spraying (default: 1) + # eclwatchVisible: true # Can the lz plane be visible from ECLWatch (default: true) + # cost: # The storage cost + # storageAtRest: 0.0135 # Storage at rest cost: cost per GiB/month + # storageapi: # Optional information to allow access to storage api + # type: azurefile | azureblob + # account: # azure storage account name + # secret: # secret name (under secrets/storage) for accessing SAS token + # containers: [ ] # a list of containers + + - name: dali + storageClass: "" + storageSize: 1Gi + prefix: "/var/lib/HPCCSystems/dalistorage" + category: dali + - name: sasha + storageClass: "" + storageSize: 1Gi + prefix: "/var/lib/HPCCSystems/sashastorage" + category: sasha + - name: dll + storageClass: "" + storageSize: 1Gi + prefix: "/var/lib/HPCCSystems/queries" + category: dll + - name: data + storageClass: "" + storageSize: 1Gi + prefix: "/var/lib/HPCCSystems/hpcc-data" + category: data + - name: mydropzone + storageClass: "" + storageSize: 1Gi + prefix: "/var/lib/HPCCSystems/mydropzone" + category: lz + - name: debug + disabled: False + storageClass: "" + storageSize: 1Gi + prefix: "/var/lib/HPCCSystems/debug" + category: debug + +## The certificates section can be used to enable cert-manager to generate TLS certificates for each component in the hpcc. +## You must first install cert-manager to use this feature. +## https://cert-manager.io/docs/installation/kubernetes/#installing-with-helm +## +## The Certificate issuers are divided into "local" (those which will be used for local mutual TLS) and "public" those +## which will be publicly accessible and therefore need to be recognized by browsers and/or other entities. +## +## Both public and local issuers have a spec section. The contents of the "spec" are documented in the cert-manager +## "Issuer configuration" documentation. https://cert-manager.io/docs/configuration/#supported-issuer-types +## +## The default configuration is meant to provide reasonable functionality without additional dependencies. +## +## Public issuers can be tricky if you want browsers to recognize the certificates. This is a complex topic outside the scope +## of this comment. The default for the public issuer generates self signed certificates. The expectation is that this will be +## overridden by the configuration of an external certificate authority or vault in QA and production environments. +## +## The default for the local (mTLS) issuer is designed to act as our own local certificate authority. We only need to recognize +## what a component is, and that it belongs to this cluster. +## But a kubernetes secret must be provided for the certificate authority key-pair. The default name for the secret +## is "hpcc-local-issuer-key-pair". The secret is a standard kubernetes.io/tls secret and should provide data values for +## "tls.crt" and "tls.key". +## +## The local issuer can also be configured to use an external certificate authority or vault. +## +certificates: + enabled: false + issuers: + local: + name: hpcc-local-issuer + ## kind can be changed to ClusterIssue to refer to a ClusterIssuer. https://cert-manager.io/docs/reference/api-docs/#cert-manager.io/v1.ClusterIssuer + kind: Issuer + ## do not define spec (set spec: null), to reference an Issuer resource that already exists in the cluster + ## change spec if you'd like to change how certificates get issued... see ## https://cert-manager.io/docs/configuration/#supported-issuer-types + ## for information on what spec should contain. + spec: + ca: + secretName: hpcc-local-issuer-key-pair + public: + name: hpcc-public-issuer + ## kind can be changed to ClusterIssue to refer to a ClusterIssuer. https://cert-manager.io/docs/reference/api-docs/#cert-manager.io/v1.ClusterIssuer + kind: Issuer + ## do not define spec (set spec: null), to reference an Issuer resource that already exists in the cluster + ## change spec if you'd like to change how certificates get issued... see ## https://cert-manager.io/docs/configuration/#supported-issuer-types + ## for information on what spec should contain. + spec: + selfSigned: {} + vaultclient: + name: hpcc-vaultclient-issuer + enabled: false + ## domain: hpcc.example.com + rolePrefix: "hpcc-" + kind: Issuer + ## do not define spec (set spec: null), to reference an Issuer resource that already exists in the cluster + ## change spec if you'd like to change how certificates get issued... see ## https://cert-manager.io/docs/configuration/#supported-issuer-types + ## for information on what spec should contain. + spec: + ca: + secretName: hpcc-vaultclient-issuer-key-pair + remote: + name: hpcc-remote-issuer + ## set enabled to true if adding remoteClients for any components + enabled: false + ## kind can be changed to ClusterIssue to refer to a ClusterIssuer. https://cert-manager.io/docs/reference/api-docs/#cert-manager.io/v1.ClusterIssuer + kind: Issuer + ## do not define spec (set spec: null), to reference an Issuer resource that already exists in the cluster + ## change spec if you'd like to change how certificates get issued... see ## https://cert-manager.io/docs/configuration/#supported-issuer-types + ## for information on what spec should contain. + spec: + ca: + secretName: hpcc-remote-issuer-key-pair + signing: # intended to be used for signing/verification purposes only, e.g. by dafilesrv + name: hpcc-signing-issuer + ## kind can be changed to ClusterIssue to refer to a ClusterIssuer. https://cert-manager.io/docs/reference/api-docs/#cert-manager.io/v1.ClusterIssuer + kind: Issuer + ## do not define spec (set spec: null), to reference an Issuer resource that already exists in the cluster + ## change spec if you'd like to change how certificates get issued... see ## https://cert-manager.io/docs/configuration/#supported-issuer-types + ## for information on what spec should contain. + spec: + ca: + secretName: hpcc-signing-issuer-key-pair + +## The secrets section contains a set of categories, each of which contain a list of secrets. The categories determine which +## components have access to the secrets. +## For each secret: +## name is the name that it is accessed by within the platform +## secret is the name of the secret that should be published +secrets: + #timeout: 300 # timeout period for cached secrets. Should be similar to the k8s refresh period. + + #Secret categories follow, remove the {} if a secret is defined in a section + storage: {} + ## Secrets that are required for accessing storage. Currently exposed in the engines, but in the future will + ## likely be restricted to esp (when it becomes the meta-data provider) + ## For example, to set the secret associated with the azure storage account "mystorageaccount" use + ##azure-mystorageaccount: storage-myazuresecret + + authn: {} + ## Category to deploy authentication secrets to container, and to create a key name alias to reference those secrets + #ldapadmincredskey: "admincredssecretname" ## Default k/v for LDAP authentication secrets + #testauthusercreds1: "testauthusercreds1" ## Default k/v for test authentication secrets + #testauthusercreds2: "testauthusercreds2" ## Default k/v for test authentication secrets + + ecl: {} + ## Category for secrets published to all components that run ecl. These secrets are for use by internal + ## ECL processing. For example HTTPCALL and SOAPCALL have built in support for secrets that are not directly + ## accessible to users, that is, accessed directly via ECL code. + + eclUser: {} + ## Category for secrets accessible via ecl code. These are secrets that users can access directly. Be cautious about + ## what secrets you add to this category as they are easily accessed by ECL code. + + codeSign: {} + #gpg-private-key-1: codesign-gpg-key-1 + #gpg-private-key-2: codesign-gpg-key-2 + + codeVerify: {} + #gpg-public-key-1: codesign-gpg-public-key-1 + #gpg-public-key-2: codesign-gpg-public-key-2 + + system: {} + ## Category for secrets published to all components for system level useage + + git: {} + ## Category to provide passwords for eclccserver to access private git repos + +## The vaults section mirrors the secret section but leverages vault for the storage of secrets. +## There is an additional category for vaults named "eclUser". "eclUser" vault +## secrets are readable directly from ECL code. Other secret categories are read internally +## by system components and not exposed directly to ECL code. +## +## For each vault: +## name is the name that it is accessed by within the platform +## url is the url used to read a secret from the vault. +## kind is the type of vault being accessed, or the protocol to use to access the secrets +## client_secret a kubernetes level secret that contains the client_token used to retrive secrets. +## if a client_secret is not provided "vault kubernetes auth" will be attempted. + +vaults: + storage: + git: + authn: + ecl: + # vault using vault client certs or kubernetes auth depending on whether certificates.issuers.vaultclient.enabled is true + # to use approle authentication specify appRoleId and appRoleSecret + # - name: my-ecl-vault + #Note the data node in the URL is there for the REST APIs use. The path inside the vault starts after /data + # url: http://${env.VAULT_SERVICE_HOST}:${env.VAULT_SERVICE_PORT}/v1/secret/data/ecl/${secret} + # kind: kv-v2 + # namespace: + eclUser: + # vault using vault client certs or kubernetes auth depending on whether certificates.issuers.vaultclient.enabled is true + # to use approle authentication specify appRoleId and appRoleSecret + # - name: my-eclUser-vault + #Note the data node in the URL is there for the REST APIs use. The path inside the vault starts after /data + # url: http://${env.VAULT_SERVICE_HOST}:${env.VAULT_SERVICE_PORT}/v1/secret/data/eclUser/${secret} + # kind: kv-v2 + esp: + + ## The keys for code signing may be imported from the vault. Multiple keys may be imported. + ## gpg keys may be imported as follows: + ## vault kv put secret/codeSign/gpg-private-key-1 passphrase= private=@ + ## vault kv put secret/codeSign/gpg-private-key-2 passphrase= private=@ + codeSign: + # - name: codesign-private-keys + # url: http://${env.VAULT_SERVICE_HOST}:${env.VAULT_SERVICE_PORT}/v1/secret/data/codeSign/${secret} + # kind: kv-v2 + # namespace: mynamespace # for use with enterprise vaults segmented by namespaces + ## The keys for verifying signed code may be imported from the vault. + ## vault kv put secret/codeVerify/gpg-public-key-1 public=@ + ## vault kv put secret/codeVerify/gpg-public-key-2 public=@ + codeVerify: + # - name: codesign-public-keys + # url: http://${env.VAULT_SERVICE_HOST}:${env.VAULT_SERVICE_PORT}/v1/secret/data/codeVerify/${secret} + # kind: kv-v2 + # namespace: mynamespace # for use with enterprise vaults segmented by namespaces + +bundles: [] +## Specifying bundles here will cause the indicated bundles to be downloaded and installed automatically +## whenever an eclccserver pod is started +# for example +# - name: DataPatterns + +# A dafilesrv 'stream' service is required to expose HPCC file access to 3rd parties (e.g. Spark / Java) +# Access will only be granted to requests that have been signed by the DFUFileAccess service +dafilesrv: +- name: rowservice + disabled: true # disabled by default because requires cert-manager etc. (see certificates section) + application: stream + service: + servicePort: 7600 + visibility: local + +# Enable if bare-metal systems require read access to this systems' data planes via ~remote:: +# If legacy ~foreign:: access is required, Dali will also need to be exposed via a service definition in the dali configuration +# NB: ingress rules should be added to limit access. +- name: direct-access + disabled: true + application: directio + service: + servicePort: 7200 + visibility: local + +- name: spray-service + application: spray + service: + servicePort: 7300 + visibility: cluster + + +dali: +- name: mydali + auth: none + services: # internal house keeping services + coalescer: + service: + servicePort: 8877 + #interval: 2 # (hours) + #at: "* * * * *" # cron type schedule, i.e. Min(0-59) Hour(0-23) DayOfMonth(1-31) Month(1-12) DayOfWeek(0-6) + #minDeltaSize: 50 # (Kb) will not start coalescing until delta log is above this threshold + resources: + cpu: "1" + memory: "10G" + + resources: + cpu: "2" + memory: "20G" + +sasha: + #disabled: true # disable all services. Alternatively set sasha to null (sasha: null) + wu-archiver: + #disabled: true + service: + servicePort: 8877 + plane: sasha + #interval: 6 # (hours) + #limit: 1000 # threshold number of workunits before archiving starts (0 disables) + #cutoff: 8 # minimum workunit age to archive (days) + #backup: 0 # minimum workunit age to backup (days, 0 disables) + #at: "* * * * *" + #duration: 0 # (maxDuration) - Maximum duration to run WorkUnit archiving session (hours, 0 unlimited) + #throttle: 0 # throttle ratio (0-99, 0 no throttling, 50 is half speed) + #retryinterval: 7 # minimal time before retrying archive of failed WorkUnits (days) + #keepResultFiles: false # option to keep result files owned by workunits after workunit is archived + resources: + cpu: "1" + memory: "4Gi" + + dfuwu-archiver: + #disabled: true + service: + servicePort: 8877 + plane: sasha + #forcePermissions: false + #limit: 1000 # threshold number of DFU workunits before archiving starts (0 disables) + #cutoff: 14 # minimum DFU workunit age to archive (days) + #interval: 24 # minimum interval between running DFU recovery archiver (in hours, 0 disables) + #at: "* * * * *" # schedule to run DFU workunit archiver (cron format) + #duration: 0 # (maxDuration) maximum duration to run DFU WorkUnit archiving session (hours, 0 unlimited) + #throttle: 0 # throttle ratio (0-99, 0 no throttling, 50 is half speed) + resources: + cpu: "1" + memory: "4Gi" + + dfurecovery-archiver: + #disabled: true + #limit: 20 # threshold number of DFU recovery items before archiving starts (0 disables) + #cutoff: 4 # minimum DFU recovery item age to archive (days) + #interval: 12 # minimum interval between running DFU recovery archiver(in hours, 0 disables) + #at: "* * * * *" # schedule to run DFU recovery archiver (cron format) + resources: + cpu: "1" + memory: "4Gi" + + file-expiry: + #disabled: true + #interval: 1 + #at: "* 3 * * *" + #persistExpiryDefault: 7 + #expiryDefault: 4 + #user: sasha + resources: + cpu: "1" + memory: "4Gi" + +dfuserver: +- name: dfuserver + maxJobs: 1 + resources: + cpu: "1" + memory: "1800Mi" + +eclagent: +- name: hthor + ## replicas indicates how many eclagent pods should be started + replicas: 1 + ## maxActive controls how many workunits may be active at once (per replica) + maxActive: 4 + ## prefix may be used to set a filename prefix applied to any relative filenames used by jobs submitted to this queue + prefix: hthor + ## Set to false if you want to launch each workunit in its own container, true to run as child processes in eclagent pod + useChildProcesses: false + ## type may be 'hthor' (the default) or 'roxie', to specify that the roxie engine rather than the hthor engine should be used for eclagent workunit processing + type: hthor + ## The following resources apply to child hThor pods when useChildProcesses=false, otherwise they apply to hThor pod. + resources: + cpu: "1" + memory: "1G" + stubResources: + cpu: "100m" + memory: "100Mi" + #egress: engineEgress + +- name: roxie-workunit + replicas: 1 + prefix: roxie_workunit + maxActive: 20 + useChildProcesses: true + type: roxie + #resources: + # cpu: "1" + # memory: "1G" + #egress: engineEgress + resources: + cpu: "1" + memory: "1G" + stubResources: + cpu: "100m" + memory: "100Mi" + +eclccserver: +- name: myeclccserver + replicas: 1 + ## Set to false if you want to launch each workunit compile in its own container, true to run as child processes in eclccserver pod. + useChildProcesses: false + ## If non-zero, and useChildProcesses is false, try spending up to this number of seconds compiling using a child process before switching to + ## a separate container. Speeds up throughput of small jobs. + childProcessTimeLimit: 10 + ## maxActive controls how many workunit compiles may be active at once (per replica) + maxActive: 4 + ## Specify a list of queues to listen on if you don't want this eclccserver listening on all queues. If empty or missing, listens on all queues + listen: [] + ## The following allows eclcc options (names start with a -) and debug options to be defined for each of the workunits that are compiled. + #options: + #- name: globalAutoHoist + # value: false + # cluster: name # optional cluster this is applied to + + # used to configure the authentication for git when using the option to compile from a repo. Also requires an associated secret. + #gitUsername: + + ## The following resources apply to child compile pods when useChildProcesses=false, otherwise they apply to eclccserver pod. + resources: + cpu: "1" + memory: "20Gi" + timedChildResources: + cpu: "1" + memory: "798Mi" + +esp: +- name: eclwatch + ## Pre-configured esp applications include eclwatch, eclservices, and eclqueries + application: eclwatch + auth: none + replicas: 1 + resources: + cpu: "4" + memory: "8G" + ## The following 'corsAllowed' section is used to configure CORS support + ## origin - the origin to support CORS requests from + ## headers - the headers to allow for the given origin via CORS + ## methods - the HTTP methods to allow for the given origin via CORS + ## + #corsAllowed: + ## origin starting with https will only allow https CORS + #- origin: https://*.my.com + # headers: + # - "X-X" + # methods: + # - "GET" + # - "OPTIONS" + ## origin starting with http will allow http or https CORS + #- origin: http://www.example.com + # headers: + # - "*" + # methods: + # - "GET" + # - "POST" + # - "OPTIONS" + +# Add remote clients to generated client certificates and make the ESP require that one of the generated certificates is provided by a client in order to connect +# When setting up remote clients make sure that certificates.issuers.remote.enabled is set to true. +# remoteClients: +# - name: petfoodapplicationprod +# organization: petfoodDept +# secretTemplate: +# annotations: +# kubed.appscode.com/sync: "hpccenv=petfoodAppProd" # use kubed config-syncer to replicate certificate to namespace with matching annotation (also supports syncing with separate aks clusters) + +# trustClients and remoteClients can be combined. Trust is far easier to manage and should now be the preferred mechanism. +# Trust is similar to remoteClients, but unlike remoteClients, the client certificates are generated elsewhere. +# If trust is present then esp will use mtls, with trust controlled by certificates.issuers.remote, which must be enabled. +# When using trustClients the remote issuer of each environment should point to the same certifate authority. +# Verification of identity is automatic if the CA matches, but only the clients listed here are actually allowed access +# trustClients: +# - commonName: rabc.example.com + + service: + ## port can be used to change the local port used by the pod. If omitted, the default port (8880) is used + port: 8888 + ## servicePort controls the port that this service will be exposed on, either internally to the cluster, or externally + servicePort: 8010 + ## wsdlAddress should be set to the host and port which clients can use to hit this service. + # This address is added to the service wsdl files which simplify setting up a SOAP client to hit this service. There may be many external factors determining the address + # that is accible to clients. + # wsdlAddress: clientfacingaddress:8010 + ## Specify visibility: local (or global) if you want the service available from outside the cluster. Typically, eclwatch and wsecl are published externally, while eclservices is designed for internal use. + visibility: local + ## Annotations can be specified on a service - for example to specify provider-specific information such as service.beta.kubernetes.io/azure-load-balancer-internal-subnet + #annotations: + # service.beta.kubernetes.io/azure-load-balancer-internal-subnet: "mysubnet" + # The service.annotations prefixed with hpcc.eclwatch.io should not be declared here. They can be declared + # in other services in order to be exposed in the ECLWatch interface. Similar function can be used by other + # applications. For other applications, the "eclwatch" inside the service.annotations should be replaced by + # their application names. + # hpcc.eclwatch.io/enabled: "true" + # hpcc.eclwatch.io/description: "some description" + ## You can also specify labels on a service + #labels: + # mylabel: "3" + ## Links specify the web links for a service. The web links may be shown on ECLWatch. + #links: + #- name: linkname + # description: "some description" + # url: "http://abc.com/def?g=1" + ## CIDRS allowed to access this service. + #loadBalancerSourceRanges: [1.2.3.4/32, 5.6.7.8/32] + # Increase maxRequestEntityLength when query deployments (or similar actions) start to fail because they surpass the maximum size + # default for EclWatch is 60M, default for other services is 8M + #maxRequestEntityLength: 70M + #resources: + # cpu: "1" + # memory: "2G" +- name: eclservices + application: eclservices + auth: none + replicas: 1 + service: + servicePort: 8010 + visibility: cluster + # Increase maxRequestEntityLength when query deployments (or similar actions) start to fail because they surpass the maximum size + # default for EclWatch is 60M, default for other services is 8M + #maxRequestEntityLength: 9M + #resources: + # cpu: "250m" + # memory: "1G" +- name: eclqueries + application: eclqueries + auth: none + replicas: 1 + service: + visibility: local + servicePort: 8002 + #annotations: + # hpcc.eclwatch.io/enabled: "true" + # hpcc.eclwatch.io/description: "Roxie Test page" + # hpcc.eclwatch.io/port: "8002" + # Increase maxRequestEntityLength when query deployments (or similar actions) start to fail because they surpass the maximum size + # default for EclWatch is 60M, default for other services is 8M + #maxRequestEntityLength: 9M + #resources: + # cpu: "250m" + # memory: "1G" +- name: esdl-sandbox + application: esdl-sandbox + auth: none + replicas: 1 + service: + visibility: local + servicePort: 8899 + # Increase maxRequestEntityLength when query deployments (or similar actions) start to fail because they surpass the maximum size + # default for EclWatch is 60M, default for other services is 8M + #maxRequestEntityLength: 9M + #resources: + # cpu: "250m" + # memory: "1G" +- name: sql2ecl + application: sql2ecl + auth: none + replicas: 1 + service: + visibility: local + servicePort: 8510 + #domain: hpccsql.com + # Increase maxRequestEntityLength when query deployments (or similar actions) start to fail because they surpass the maximum size + # default for EclWatch is 60M, default for other services is 8M + #maxRequestEntityLength: 9M + #resources: + # cpu: "250m" + # memory: "1G" +- name: dfs + application: dfs + auth: none + replicas: 1 + service: + visibility: local + servicePort: 8520 + # Increase maxRequestEntityLength when query deployments (or similar actions) start to fail because they surpass the maximum size + # default for EclWatch is 60M, default for other services is 8M + #maxRequestEntityLength: 9M + #resources: + # cpu: "250m" + # memory: "1G" + + +#- name: ldapenvironment + #ldapenvironment is a stand alone ESP service used to help stand up new HPCC LDAP Environments +# application: ldapenvironment +# auth: ldap +# #specify the hpcc branch Root Name +# hpccRootName: ou=hpcc,dc=myldap,dc=com +# #specify all BaseDN with your LDAP Server's "dc=" settings +# sharedFilesBaseDN: ou=files,ou=hpcc,dc=myldap,dc=com +# sharedGroupsBaseDN: ou=groups,ou=hpcc,dc=myldap,dc=com +# sharedUsersBaseDN: ou=users,ou=hpcc,dc=myldap,dc=com +# sharedResourcesBaseDN: ou=smc,ou=espservices,ou=hpcc,dc=myldap,dc=com +# sharedWorkunitsBaseDN: ou=workunits,ou=hpcc,dc=myldap,dc=com +# adminGroupName: HPCCAdmins +# replicas: 1 +# service: +# visibility: local +# servicePort: 8511 + +roxie: +- name: roxie + disabled: false + prefix: roxie + services: + - name: roxie + servicePort: 9876 + listenQueue: 200 + numThreads: 30 + visibility: local +# trustClients: +# - commonName: rabc.example.com +# - commonName: rbcd.example.com + # Can override ingress rules for each service if desired - for example to add no additional ingress permissions you can use + # ingress: [] + +# Trust is similar to remoteClients, but unlike remoteClients, the client certificates are generated elsewhere. +# If trust is present then roxie will use mtls with trust controlled by certificates.issuer.remote. +# Using the trust section the remote issuer of each environment should point to the same certifate authority. +# Verification of identity is automatic if the CA matches, but only the clients listed here are actually allowed access +# trust: +# - commonName: abc.example.com +# - commonName: bcd.example.com + + ## replicas indicates the number of replicas per channel + replicas: 2 + numChannels: 2 + ## Set singleNode to true for a scalable cluster of "single-node" roxie servers, each implementing all channels locally + singleNode: false + ## Adjust traceLevel to taste (1 is default) + traceLevel: 1 + ## set totalMemoryLimit to indicate how much memory is preallocated for roxie row data + # totalMemoryLimit: "1Gi" # Default 1Gi, capped to 75% of resources.memory if defined. + ## Set mtuPayload to the maximum amount of data Roxie will put in a single packet. This should be just less than the system MTU. Default is 1400 + # mtuPayload: 3800 + + ## resources specifies the resources required by each agent pod + resources: + cpu: "8" + memory: "12G" + topoResources: + cpu: "789m" + memory: "543Mi" + serverResources: + cpu: "2" + memory: "8G" + channelResources: + cpu: "4" + memory: "6Gi" + + ## Set serverReplicas to indicate a separate replicaSet of roxie servers, with agent pods not acting as servers + serverReplicas: 0 + ## If serverReplicas is set, the resources required for the server pods can be configured separately from the agent (channel) pods + #serverResources: + # cpu: "1" + # memory: "4Gi" + #channelResources: + # cpu: "2" + # memory: "8Gi" + + # Roxie may take a while to start up if there are a lot of queries to load. Yuo may need to + #override the default startup/readiness probing by setting these values + #minStartupTime: 30 # How long to wait before initiating startup probing + #maxStartupTime: 600 # Maximum time to wait for startup to complete before failing + topoServer: + replicas: 1 + #directAccessPlanes: [] #add direct access planes that roxie will read from without copying the data to its default data plane + #ldapUser: roxie_file_access #add system username for accessing files + #egress: engineEgress + +## The [manager/worker/eclAgent]Resources define the resource limits for each container. +## If numWorkersPerPod is >1 (must be a factor of numWorkers). +## NB: Each worker corresponds to a container, that will be resourced according to +## workerResources, meaning that if numWorkersPerPod>1, N * workerResources.cpu, +## N * workerResources.memory etc., will be required in total for the pod. +## +## By default the available Thor memory will be based on the resourced container memory. +## This can be overriden by setting [worker/manager]Memory.query and +## [worker/manager]Memory.thirdParty. +thor: +- name: thor + prefix: thor + numWorkers: 2 + maxJobs: 4 + maxGraphs: 2 + #maxGraphStartupTime: 600 + #numWorkersPerPod: 1 + managerResources: + cpu: "1" + memory: "2G" + workerResources: + cpu: "4" + memory: "4G" + #workerMemory: + # query: "3G" + # thirdParty: "500M" + eclAgentResources: + cpu: "1" + memory: "432M" + #egress: engineEgress + +eclscheduler: +- name: eclscheduler + resources: + cpu: "567m" + memory: "4321M"