diff --git a/.gitleaksignore b/.gitleaksignore
new file mode 100644
index 0000000..d043ec7
--- /dev/null
+++ b/.gitleaksignore
@@ -0,0 +1 @@
+cbb0d660a09cc0c47851ef59ac7f0c5fca177371:terraform/vmo-cluster/manifests/k8s-values.yaml:generic-api-key:114
diff --git a/terraform/vmo-cluster/README.md b/terraform/vmo-cluster/README.md
new file mode 100644
index 0000000..f62ee05
--- /dev/null
+++ b/terraform/vmo-cluster/README.md
@@ -0,0 +1,74 @@
+# Deploy and Manage VMs using Palette VMO
+
+This folder contains the demo code for the **Deploy and Manage VMs using Palette VMO** tutorial.
+
+The Terraform code has two main toggle variables that you can use to deploy resources to [Canonical MAAS](https://maas.io/docs).
+
+| Variable | Provider | Description | Default |
+| ---------------- | -------- | ------------------------------------------------- | ------- |
+| `deploy-maas` | MAAS | Enable to deploy a cluster to MAAS. | `false` |
+| `deploy-maas-vm` | MAAS | Enable to deploy a VM to a deployed MAAS cluster. | `false` |
+
+
+To get started, open the **terraform.tfvars** file. Toggle the provider variable as specified in the table and provide values to your cloud provider variables, replacing all instances of the string `REPLACE ME`.
+
+
+## Requirements
+
+| Name | Version |
+|------|---------|
+| [terraform](#requirement\_terraform) | >= 1.9 |
+| [local](#requirement\_local) | 2.4.1 |
+| [spectrocloud](#requirement\_spectrocloud) | >= 0.22.2 |
+| [tls](#requirement\_tls) | 4.0.4 |
+
+## Providers
+
+| Name | Version |
+|------|---------|
+| [local](#provider\_local) | 2.4.1 |
+| [spectrocloud](#provider\_spectrocloud) | 0.22.2 |
+| [tls](#provider\_tls) | 4.0.4 |
+
+## Modules
+
+No modules.
+
+## Resources
+
+| Name | Type |
+|------|------|
+| [spectrocloud_cluster_profile.maas-vmo-profile](https://registry.terraform.io/providers/spectrocloud/spectrocloud/latest/docs/resources/cluster_profile) | resource |
+| [spectrocloud_cluster_maas.maas-cluster](https://registry.terraform.io/providers/spectrocloud/spectrocloud/latest/docs/resources/cluster_maas) | resource |
+| [spectrocloud_virtual_machine.virtual-machine](https://registry.terraform.io/providers/spectrocloud/spectrocloud/latest/docs/resources/virtual_machine) | resource |
+| [spectrocloud_cloudaccount_maas.account](https://registry.terraform.io/providers/spectrocloud/spectrocloud/latest/docs/data-sources/cloudaccount_maas) | data source |
+| [spectrocloud_pack.maas_vmo](https://registry.terraform.io/providers/spectrocloud/spectrocloud/latest/docs/data-sources/pack) | data source |
+| [spectrocloud_pack.maas_cni](https://registry.terraform.io/providers/spectrocloud/spectrocloud/latest/docs/data-sources/pack) | data source |
+| [spectrocloud_pack.maas_csi](https://registry.terraform.io/providers/spectrocloud/spectrocloud/latest/docs/data-sources/pack) | data source |
+| [spectrocloud_pack.maas_k8s](https://registry.terraform.io/providers/spectrocloud/spectrocloud/latest/docs/data-sources/pack) | data source |
+| [spectrocloud_pack.maas_ubuntu](https://registry.terraform.io/providers/spectrocloud/spectrocloud/latest/docs/data-sources/pack) | data source |
+| [spectrocloud_cluster.maas_vmo_cluster](https://registry.terraform.io/providers/spectrocloud/spectrocloud/latest/docs/data-sources/cluster) | data source |
+| [spectrocloud_registry.public_registry](https://registry.terraform.io/providers/spectrocloud/spectrocloud/latest/docs/data-sources/registry) | data source |
+
+## Inputs
+
+| Name | Description | Type | Default | Required |
+|------|-------------|------|---------|:--------:|
+| [deploy-maas](#input\_deploy-maas) | A flag for enabling a cluster deployment on MAAS. | `bool` | n/a | yes |
+| [deploy-maas-vm](#input\_deploy-maas-vm) | A flag for enabling a VM creation on a MAAS cluster. | `bool` | n/a | yes |
+| [pcg-name](#input\_pcg-name) | The name of the PCG that will be used to deploy the cluster. | `string` | n/a | yes |
+| [maas-domain](#input\_maas-domain) | The MaaS domain that will be used to deploy the cluster. | `string` | n/a | yes |
+| [maas-worker-nodes](#input\_maas-worker-nodes) | The number of worker nodes that will be used to deploy the cluster. | `number` | 1 | yes |
+| [maas-control-plane-nodes](#input\_maas-control-plane-nodes) | The number of control plane nodes that will be used to deploy the cluster. | `number` | 1 | yes |
+| [maas-worker-resource-pool](#input\_maas-worker-resource-pool) | The resource pool to deploy the worker nodes to. | `string` | n/a | yes |
+| [maas-control-plane-resource-pool](#input\_maas-control-plane-resource-pool) | The resource pool to deploy the control plane nodes to. | `string` | n/a | yes |
+| [maas-worker-azs](#input\_maas-worker-azs) | The set of availability zones to deploy the worker nodes to. | `set(string)` | n/a | yes |
+| [maas-control-plane-azs](#input\_maas-control-plane-azs) | The set of availability zones to deploy the control plane nodes to. | `set(string)` | n/a | yes |
+| [maas-worker-node-tags](#input\_maas-worker-node-tags) | The set of tag values that you want to apply to all nodes in the node worker pool. | `set(string)` | n/a | yes |
+| [maas-control-plane-node-tags](#input\_maas-control-plane-node-tags) | The set of tag values that you want to apply to all nodes in the node control plane pool. | `set(string)` | n/a | yes |
+| [tags](#input\_tags) | The default tags to apply to Palette resources. | `list(string)` |
[
"spectro-cloud-education",
"spectrocloud:tutorials",
"terraform_managed:true",
"tutorial:vmo-cluster-deployment"
]
| no |
+
+## Outputs
+No outputs.
+
+
diff --git a/terraform/vmo-cluster/cluster_profiles.tf b/terraform/vmo-cluster/cluster_profiles.tf
new file mode 100644
index 0000000..04bfbf7
--- /dev/null
+++ b/terraform/vmo-cluster/cluster_profiles.tf
@@ -0,0 +1,68 @@
+
+##########################
+# MAAS VMO Cluster Profile
+##########################
+resource "spectrocloud_cluster_profile" "maas-vmo-profile" {
+ count = var.deploy-maas ? 1 : 0
+
+ name = "tf-maas-vmo-profile"
+ description = "A basic cluster profile for MAAS VMO"
+ tags = concat(var.tags, ["env:maas"])
+ cloud = "maas"
+ type = "cluster"
+ version = "1.0.0"
+
+ pack {
+ name = data.spectrocloud_pack.maas_ubuntu.name
+ tag = data.spectrocloud_pack.maas_ubuntu.version
+ uid = data.spectrocloud_pack.maas_ubuntu.id
+ values = file("manifests/ubuntu-values.yaml")
+ type = "spectro"
+ }
+
+ pack {
+ name = data.spectrocloud_pack.maas_k8s.name
+ tag = data.spectrocloud_pack.maas_k8s.version
+ uid = data.spectrocloud_pack.maas_k8s.id
+ values = file("manifests/k8s-values.yaml")
+ type = "spectro"
+ }
+
+ pack {
+ name = data.spectrocloud_pack.maas_cni.name
+ tag = data.spectrocloud_pack.maas_cni.version
+ uid = data.spectrocloud_pack.maas_cni.id
+ values = file("manifests/cni-values.yaml")
+ type = "spectro"
+ }
+
+ pack {
+ name = data.spectrocloud_pack.maas_csi.name
+ tag = data.spectrocloud_pack.maas_csi.version
+ uid = data.spectrocloud_pack.maas_csi.id
+ values = templatefile("manifests/csi-values.yaml", {
+ worker_nodes = var.maas-worker-nodes,
+ })
+ type = "spectro"
+ }
+
+ pack {
+ name = data.spectrocloud_pack.maas_vmo.name
+ tag = data.spectrocloud_pack.maas_vmo.version
+ uid = data.spectrocloud_pack.maas_vmo.id
+ values = file("manifests/vmo-values.yaml")
+ type = "spectro"
+ }
+
+ pack {
+ name = "vmo-extras"
+ type = "manifest"
+ tag = "1.0.0"
+ values = file("manifests/vmo-extras-values.yaml")
+ manifest {
+ name = "vmo-extras"
+ content = file("manifests/vmo-extras-manifest.yaml")
+ }
+ }
+
+}
diff --git a/terraform/vmo-cluster/clusters.tf b/terraform/vmo-cluster/clusters.tf
new file mode 100644
index 0000000..aecec17
--- /dev/null
+++ b/terraform/vmo-cluster/clusters.tf
@@ -0,0 +1,52 @@
+# Copyright (c) Spectro Cloud
+# SPDX-License-Identifier: Apache-2.0
+
+################
+# MAAS Cluster
+################
+
+resource "spectrocloud_cluster_maas" "maas-cluster" {
+ count = var.deploy-maas ? 1 : 0
+
+ name = "vmo-cluster-maas"
+ tags = concat(var.tags, ["env:maas"])
+ cloud_account_id = data.spectrocloud_cloudaccount_maas.account[0].id
+ pause_agent_upgrades = "unlock"
+
+ cloud_config {
+ domain = var.maas-domain
+ }
+
+ cluster_profile {
+ id = resource.spectrocloud_cluster_profile.maas-vmo-profile[0].id
+ }
+
+ machine_pool {
+ name = "maas-control-plane"
+ count = 1
+ control_plane = true
+ azs = var.maas-control-plane-azs
+ node_tags = var.maas-control-plane-node-tags
+ instance_type {
+ min_cpu = 8
+ min_memory_mb = 16000
+ }
+ placement {
+ resource_pool = var.maas-control-plane-resource-pool
+ }
+ }
+
+ machine_pool {
+ name = "maas-worker-basic"
+ count = 1
+ azs = var.maas-worker-azs
+ node_tags = var.maas-worker-node-tags
+ instance_type {
+ min_cpu = 8
+ min_memory_mb = 32000
+ }
+ placement {
+ resource_pool = var.maas-worker-resource-pool
+ }
+ }
+}
diff --git a/terraform/vmo-cluster/data.tf b/terraform/vmo-cluster/data.tf
new file mode 100644
index 0000000..e820e86
--- /dev/null
+++ b/terraform/vmo-cluster/data.tf
@@ -0,0 +1,57 @@
+# Copyright (c) Spectro Cloud
+# SPDX-License-Identifier: Apache-2.0
+
+########################################
+# Data resources for the cluster profile
+########################################
+data "spectrocloud_registry" "public_registry" {
+ name = "Public Repo"
+}
+
+######
+# MAAS
+######
+
+data "spectrocloud_cloudaccount_maas" "account" {
+ count = var.deploy-maas ? 1 : 0
+ name = var.pcg-name
+}
+
+data "spectrocloud_pack" "maas_ubuntu" {
+ name = "ubuntu-maas"
+ version = "22.04"
+ registry_uid = data.spectrocloud_registry.public_registry.id
+}
+
+data "spectrocloud_pack" "maas_k8s" {
+ name = "kubernetes"
+ version = "1.30.6"
+ registry_uid = data.spectrocloud_registry.public_registry.id
+}
+
+data "spectrocloud_pack" "maas_cni" {
+ name = "cni-cilium-oss"
+ version = "1.15.3"
+ registry_uid = data.spectrocloud_registry.public_registry.id
+}
+
+data "spectrocloud_pack" "maas_csi" {
+ name = "csi-rook-ceph-helm"
+ version = "1.14.9"
+ registry_uid = data.spectrocloud_registry.public_registry.id
+}
+
+data "spectrocloud_pack" "maas_vmo" {
+ name = "virtual-machine-orchestrator"
+ version = "4.4.10"
+ registry_uid = data.spectrocloud_registry.public_registry.id
+}
+
+data "spectrocloud_cluster" "maas_vmo_cluster" {
+ count = var.deploy-maas-vm ? 1 : 0
+ depends_on = [spectrocloud_cluster_maas.maas-cluster]
+ name = "vmo-cluster-maas"
+ context = "project"
+}
+
+
diff --git a/terraform/vmo-cluster/inputs.tf b/terraform/vmo-cluster/inputs.tf
new file mode 100644
index 0000000..463089b
--- /dev/null
+++ b/terraform/vmo-cluster/inputs.tf
@@ -0,0 +1,148 @@
+# Copyright (c) Spectro Cloud
+# SPDX-License-Identifier: Apache-2.0
+
+#########
+# Palette
+#########
+
+variable "palette-project" {
+ type = string
+ description = "The name of your project in Palette."
+
+ validation {
+ condition = var.palette-project != ""
+ error_message = "Provide the correct Palette project."
+ }
+
+}
+
+######################
+# Common Configuration
+######################
+
+variable "tags" {
+ type = list(string)
+ description = "The default tags to apply to Palette resources."
+ default = [
+ "spectro-cloud-education",
+ "spectrocloud:tutorials",
+ "terraform_managed:true",
+ "tutorial:vmo-cluster-deployment"
+ ]
+}
+
+######
+# MAAS
+######
+
+variable "deploy-maas" {
+ type = bool
+ description = "A flag for enabling a deployment on MAAS."
+}
+
+variable "deploy-maas-vm" {
+ type = bool
+ description = "A flag for enabling a VM creation on the MAAS cluster."
+}
+
+variable "pcg-name" {
+ type = string
+ description = "The name of the PCG that will be used to deploy the cluster."
+
+ validation {
+ condition = var.deploy-maas ? var.pcg-name != "REPLACE ME" && var.pcg-name != "" : true
+ error_message = "Provide the correct MAAS PCG name."
+ }
+}
+
+variable "maas-domain" {
+ type = string
+ description = "MAAS domain"
+
+ validation {
+ condition = var.deploy-maas ? var.maas-domain != "REPLACE ME" && var.maas-domain != "" : true
+ error_message = "Provide the correct MAAS domain."
+ }
+}
+
+variable "maas-worker-nodes" {
+ type = number
+ description = "Number of MaaS worker nodes"
+ default = 1
+
+ validation {
+ condition = var.deploy-maas ? var.maas-worker-nodes > 0 : true
+ error_message = "Provide a valid number of worker nodes."
+ }
+}
+
+variable "maas-worker-resource-pool" {
+ type = string
+ description = "Resource pool for the MAAS worker nodes."
+
+ validation {
+ condition = var.deploy-maas ? var.maas-worker-resource-pool != "REPLACE ME" && var.maas-worker-resource-pool != "" : true
+ error_message = "Provide a valid resource pool for worker nodes."
+ }
+}
+
+variable "maas-worker-azs" {
+ type = set(string)
+ description = "Set of AZs for the MAAS worker nodes."
+
+ validation {
+ condition = var.deploy-maas ? !contains(var.maas-worker-azs, "REPLACE ME") && length(var.maas-worker-azs) != 0 : true
+ error_message = "Provide a valid set of AZs for worker nodes."
+ }
+}
+
+variable "maas-worker-node-tags" {
+ type = set(string)
+ description = "Set of node tags for the MAAS worker nodes."
+
+ validation {
+ condition = var.deploy-maas ? !contains(var.maas-worker-node-tags, "REPLACE ME") && length(var.maas-worker-node-tags) != 0 : true
+ error_message = "Provide a valid set of node tags for worker nodes."
+ }
+}
+
+variable "maas-control-plane-nodes" {
+ type = number
+ description = "Number of MaaS control plane nodes"
+ default = 1
+
+ validation {
+ condition = var.deploy-maas ? var.maas-control-plane-nodes > 0 : true
+ error_message = "Provide a valid number of control plane nodes."
+ }
+}
+
+variable "maas-control-plane-resource-pool" {
+ type = string
+ description = "Resource pool for the MAAS control plane nodes."
+
+ validation {
+ condition = var.deploy-maas ? var.maas-control-plane-resource-pool != "REPLACE ME" && var.maas-control-plane-resource-pool != "" : true
+ error_message = "Provide a valid resource pool for worker nodes."
+ }
+}
+
+variable "maas-control-plane-azs" {
+ type = set(string)
+ description = "Set of AZs for the MAAS control plane nodes."
+
+ validation {
+ condition = var.deploy-maas ? !contains(var.maas-control-plane-azs, "REPLACE ME") && length(var.maas-control-plane-azs) != 0 : true
+ error_message = "Provide a valid set of AZs for control plane nodes."
+ }
+}
+
+variable "maas-control-plane-node-tags" {
+ type = set(string)
+ description = "Set of node tags for the MAAS control plane nodes."
+
+ validation {
+ condition = var.deploy-maas ? !contains(var.maas-control-plane-node-tags, "REPLACE ME") && length(var.maas-control-plane-node-tags) != 0 : true
+ error_message = "Provide a valid set of node tags for control plane nodes."
+ }
+}
diff --git a/terraform/vmo-cluster/manifests/cni-values.yaml b/terraform/vmo-cluster/manifests/cni-values.yaml
new file mode 100644
index 0000000..897c048
--- /dev/null
+++ b/terraform/vmo-cluster/manifests/cni-values.yaml
@@ -0,0 +1,3547 @@
+pack:
+ content:
+ images:
+ - image: quay.io/cilium/certgen:v0.1.9
+ - image: quay.io/cilium/cilium:v1.15.3
+ - image: quay.io/cilium/cilium-envoy:v1.27.3-99c1c8f42c8de70fc8f6dd594f4a425cd38b6688
+ - image: quay.io/cilium/cilium-etcd-operator:v2.0.7
+ - image: quay.io/cilium/clustermesh-apiserver:v1.15.3
+ - image: quay.io/cilium/hubble-relay:v1.15.3
+ - image: quay.io/cilium/hubble-ui:v0.13.0
+ - image: quay.io/cilium/hubble-ui-backend:v0.13.0
+ - image: quay.io/cilium/operator:v1.15.3
+ - image: quay.io/cilium/operator-generic:v1.15.3
+ - image: quay.io/cilium/operator-aws:v1.15.3
+ - image: quay.io/cilium/operator-azure:v1.15.3
+ - image: quay.io/cilium/startup-script:62093c5c233ea914bfa26a10ba41f8780d9b737f
+ - image: ghcr.io/spiffe/spire-agent:1.8.5
+ - image: ghcr.io/spiffe/spire-server:1.8.5
+ - image: docker.io/library/busybox:1.36.1
+
+ charts:
+ - repo: https://helm.cilium.io/
+ name: cilium
+ version: 1.15.3
+ #The namespace (on the target cluster) to install this chart
+ #When not found, a new namespace will be created
+ namespace: kube-system
+
+charts:
+ cilium:
+ # upgradeCompatibility helps users upgrading to ensure that the configMap for
+ # Cilium will not change critical values to ensure continued operation
+ # This flag is not required for new installations.
+ # For example: 1.7, 1.8, 1.9
+ # upgradeCompatibility: '1.8'
+
+ debug:
+ # -- Enable debug logging
+ enabled: false
+ # -- Configure verbosity levels for debug logging
+ # This option is used to enable debug messages for operations related to such
+ # sub-system such as (e.g. kvstore, envoy, datapath or policy), and flow is
+ # for enabling debug messages emitted per request, message and connection.
+ # Multiple values can be set via a space-separated string (e.g. "datapath envoy").
+ #
+ # Applicable values:
+ # - flow
+ # - kvstore
+ # - envoy
+ # - datapath
+ # - policy
+ verbose: ~
+
+ rbac:
+ # -- Enable creation of Resource-Based Access Control configuration.
+ create: true
+
+ # -- Configure image pull secrets for pulling container images
+ imagePullSecrets:
+ # - name: "image-pull-secret"
+
+ # -- (string) Kubernetes config path
+ # @default -- `"~/.kube/config"`
+ kubeConfigPath: ""
+ # -- (string) Kubernetes service host - use "auto" for automatic lookup from the cluster-info ConfigMap (kubeadm-based clusters only)
+ k8sServiceHost: ""
+ # -- (string) Kubernetes service port
+ k8sServicePort: ""
+
+ # -- Configure the client side rate limit for the agent and operator
+ #
+ # If the amount of requests to the Kubernetes API server exceeds the configured
+ # rate limit, the agent and operator will start to throttle requests by delaying
+ # them until there is budget or the request times out.
+ k8sClientRateLimit:
+ # -- (int) The sustained request rate in requests per second.
+ # @default -- 5 for k8s up to 1.26. 10 for k8s version 1.27+
+ qps:
+ # -- (int) The burst request rate in requests per second.
+ # The rate limiter will allow short bursts with a higher rate.
+ # @default -- 10 for k8s up to 1.26. 20 for k8s version 1.27+
+ burst:
+
+ cluster:
+ # -- Name of the cluster. Only required for Cluster Mesh and mutual authentication with SPIRE.
+ name: default
+ # -- (int) Unique ID of the cluster. Must be unique across all connected
+ # clusters and in the range of 1 to 255. Only required for Cluster Mesh,
+ # may be 0 if Cluster Mesh is not used.
+ id: 0
+
+ # -- Define serviceAccount names for components.
+ # @default -- Component's fully qualified name.
+ serviceAccounts:
+ cilium:
+ create: true
+ name: cilium
+ automount: true
+ annotations: {}
+ nodeinit:
+ create: true
+ # -- Enabled is temporary until https://github.com/cilium/cilium-cli/issues/1396 is implemented.
+ # Cilium CLI doesn't create the SAs for node-init, thus the workaround. Helm is not affected by
+ # this issue. Name and automount can be configured, if enabled is set to true.
+ # Otherwise, they are ignored. Enabled can be removed once the issue is fixed.
+ # Cilium-nodeinit DS must also be fixed.
+ enabled: false
+ name: cilium-nodeinit
+ automount: true
+ annotations: {}
+ envoy:
+ create: true
+ name: cilium-envoy
+ automount: true
+ annotations: {}
+ etcd:
+ create: true
+ name: cilium-etcd-operator
+ automount: true
+ annotations: {}
+ operator:
+ create: true
+ name: cilium-operator
+ automount: true
+ annotations: {}
+ preflight:
+ create: true
+ name: cilium-pre-flight
+ automount: true
+ annotations: {}
+ relay:
+ create: true
+ name: hubble-relay
+ automount: false
+ annotations: {}
+ ui:
+ create: true
+ name: hubble-ui
+ automount: true
+ annotations: {}
+ clustermeshApiserver:
+ create: true
+ name: clustermesh-apiserver
+ automount: true
+ annotations: {}
+ # -- Clustermeshcertgen is used if clustermesh.apiserver.tls.auto.method=cronJob
+ clustermeshcertgen:
+ create: true
+ name: clustermesh-apiserver-generate-certs
+ automount: true
+ annotations: {}
+ # -- Hubblecertgen is used if hubble.tls.auto.method=cronJob
+ hubblecertgen:
+ create: true
+ name: hubble-generate-certs
+ automount: true
+ annotations: {}
+
+ # -- Configure termination grace period for cilium-agent DaemonSet.
+ terminationGracePeriodSeconds: 1
+
+ # -- Install the cilium agent resources.
+ agent: true
+
+ # -- Agent container name.
+ name: cilium
+
+ # -- Roll out cilium agent pods automatically when configmap is updated.
+ rollOutCiliumPods: false
+
+ # -- Agent container image.
+ image:
+ override: ~
+ repository: "quay.io/cilium/cilium"
+ tag: "v1.15.3"
+ pullPolicy: "IfNotPresent"
+ # cilium-digest
+ digest: ""
+ useDigest: false
+
+ # -- Affinity for cilium-agent.
+ affinity:
+ podAntiAffinity:
+ requiredDuringSchedulingIgnoredDuringExecution:
+ - topologyKey: kubernetes.io/hostname
+ labelSelector:
+ matchLabels:
+ k8s-app: cilium
+
+ # -- Node selector for cilium-agent.
+ nodeSelector:
+ kubernetes.io/os: linux
+
+ # -- Node tolerations for agent scheduling to nodes with taints
+ # ref: https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/
+ tolerations:
+ - operator: Exists
+ # - key: "key"
+ # operator: "Equal|Exists"
+ # value: "value"
+ # effect: "NoSchedule|PreferNoSchedule|NoExecute(1.6 only)"
+
+ # -- The priority class to use for cilium-agent.
+ priorityClassName: ""
+
+ # -- DNS policy for Cilium agent pods.
+ # Ref: https://kubernetes.io/docs/concepts/services-networking/dns-pod-service/#pod-s-dns-policy
+ dnsPolicy: ""
+
+ # -- Additional containers added to the cilium DaemonSet.
+ extraContainers: []
+
+ # -- Additional agent container arguments.
+ extraArgs: []
+
+ # -- Additional agent container environment variables.
+ extraEnv: []
+
+ # -- Additional agent hostPath mounts.
+ extraHostPathMounts: []
+ # - name: host-mnt-data
+ # mountPath: /host/mnt/data
+ # hostPath: /mnt/data
+ # hostPathType: Directory
+ # readOnly: true
+ # mountPropagation: HostToContainer
+
+ # -- Additional agent volumes.
+ extraVolumes: []
+
+ # -- Additional agent volumeMounts.
+ extraVolumeMounts: []
+
+ # -- extraConfig allows you to specify additional configuration parameters to be
+ # included in the cilium-config configmap.
+ extraConfig: {}
+ # my-config-a: "1234"
+ # my-config-b: |-
+ # test 1
+ # test 2
+ # test 3
+
+ # -- Annotations to be added to all top-level cilium-agent objects (resources under templates/cilium-agent)
+ annotations: {}
+
+ # -- Security Context for cilium-agent pods.
+ podSecurityContext: {}
+
+ # -- Annotations to be added to agent pods
+ podAnnotations: {}
+
+ # -- Labels to be added to agent pods
+ podLabels: {}
+
+ # -- Agent resource limits & requests
+ # ref: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
+ resources: {}
+ # limits:
+ # cpu: 4000m
+ # memory: 4Gi
+ # requests:
+ # cpu: 100m
+ # memory: 512Mi
+
+ # -- resources & limits for the agent init containers
+ initResources: {}
+
+ securityContext:
+ # -- User to run the pod with
+ # runAsUser: 0
+ # -- Run the pod with elevated privileges
+ privileged: false
+ # -- SELinux options for the `cilium-agent` and init containers
+ seLinuxOptions:
+ level: 's0'
+ # Running with spc_t since we have removed the privileged mode.
+ # Users can change it to a different type as long as they have the
+ # type available on the system.
+ type: 'spc_t'
+ capabilities:
+ # -- Capabilities for the `cilium-agent` container
+ ciliumAgent:
+ # Use to set socket permission
+ - CHOWN
+ # Used to terminate envoy child process
+ - KILL
+ # Used since cilium modifies routing tables, etc...
+ - NET_ADMIN
+ # Used since cilium creates raw sockets, etc...
+ - NET_RAW
+ # Used since cilium monitor uses mmap
+ - IPC_LOCK
+ # Used in iptables. Consider removing once we are iptables-free
+ - SYS_MODULE
+ # We need it for now but might not need it for >= 5.11 specially
+ # for the 'SYS_RESOURCE'.
+ # In >= 5.8 there's already BPF and PERMON capabilities
+ - SYS_ADMIN
+ # Could be an alternative for the SYS_ADMIN for the RLIMIT_NPROC
+ - SYS_RESOURCE
+ # Both PERFMON and BPF requires kernel 5.8, container runtime
+ # cri-o >= v1.22.0 or containerd >= v1.5.0.
+ # If available, SYS_ADMIN can be removed.
+ #- PERFMON
+ #- BPF
+ # Allow discretionary access control (e.g. required for package installation)
+ - DAC_OVERRIDE
+ # Allow to set Access Control Lists (ACLs) on arbitrary files (e.g. required for package installation)
+ - FOWNER
+ # Allow to execute program that changes GID (e.g. required for package installation)
+ - SETGID
+ # Allow to execute program that changes UID (e.g. required for package installation)
+ - SETUID
+ # -- Capabilities for the `mount-cgroup` init container
+ mountCgroup:
+ # Only used for 'mount' cgroup
+ - SYS_ADMIN
+ # Used for nsenter
+ - SYS_CHROOT
+ - SYS_PTRACE
+ # -- capabilities for the `apply-sysctl-overwrites` init container
+ applySysctlOverwrites:
+ # Required in order to access host's /etc/sysctl.d dir
+ - SYS_ADMIN
+ # Used for nsenter
+ - SYS_CHROOT
+ - SYS_PTRACE
+ # -- Capabilities for the `clean-cilium-state` init container
+ cleanCiliumState:
+ # Most of the capabilities here are the same ones used in the
+ # cilium-agent's container because this container can be used to
+ # uninstall all Cilium resources, and therefore it is likely that
+ # will need the same capabilities.
+ # Used since cilium modifies routing tables, etc...
+ - NET_ADMIN
+ # Used in iptables. Consider removing once we are iptables-free
+ - SYS_MODULE
+ # We need it for now but might not need it for >= 5.11 specially
+ # for the 'SYS_RESOURCE'.
+ # In >= 5.8 there's already BPF and PERMON capabilities
+ - SYS_ADMIN
+ # Could be an alternative for the SYS_ADMIN for the RLIMIT_NPROC
+ - SYS_RESOURCE
+ # Both PERFMON and BPF requires kernel 5.8, container runtime
+ # cri-o >= v1.22.0 or containerd >= v1.5.0.
+ # If available, SYS_ADMIN can be removed.
+ #- PERFMON
+ #- BPF
+
+ # -- Cilium agent update strategy
+ updateStrategy:
+ type: RollingUpdate
+ rollingUpdate:
+ maxUnavailable: 2
+
+ # Configuration Values for cilium-agent
+
+ aksbyocni:
+ # -- Enable AKS BYOCNI integration.
+ # Note that this is incompatible with AKS clusters not created in BYOCNI mode:
+ # use Azure integration (`azure.enabled`) instead.
+ enabled: false
+
+ # -- Enable installation of PodCIDR routes between worker
+ # nodes if worker nodes share a common L2 network segment.
+ autoDirectNodeRoutes: false
+
+ # -- Annotate k8s node upon initialization with Cilium's metadata.
+ annotateK8sNode: false
+
+ azure:
+ # -- Enable Azure integration.
+ # Note that this is incompatible with AKS clusters created in BYOCNI mode: use
+ # AKS BYOCNI integration (`aksbyocni.enabled`) instead.
+ enabled: false
+ # usePrimaryAddress: false
+ # resourceGroup: group1
+ # subscriptionID: 00000000-0000-0000-0000-000000000000
+ # tenantID: 00000000-0000-0000-0000-000000000000
+ # clientID: 00000000-0000-0000-0000-000000000000
+ # clientSecret: 00000000-0000-0000-0000-000000000000
+ # userAssignedIdentityID: 00000000-0000-0000-0000-000000000000
+
+ alibabacloud:
+ # -- Enable AlibabaCloud ENI integration
+ enabled: false
+
+ # -- Enable bandwidth manager to optimize TCP and UDP workloads and allow
+ # for rate-limiting traffic from individual Pods with EDT (Earliest Departure
+ # Time) through the "kubernetes.io/egress-bandwidth" Pod annotation.
+ bandwidthManager:
+ # -- Enable bandwidth manager infrastructure (also prerequirement for BBR)
+ enabled: false
+ # -- Activate BBR TCP congestion control for Pods
+ bbr: false
+
+ # -- Configure standalone NAT46/NAT64 gateway
+ nat46x64Gateway:
+ # -- Enable RFC8215-prefixed translation
+ enabled: false
+
+ # -- EnableHighScaleIPcache enables the special ipcache mode for high scale
+ # clusters. The ipcache content will be reduced to the strict minimum and
+ # traffic will be encapsulated to carry security identities.
+ highScaleIPcache:
+ # -- Enable the high scale mode for the ipcache.
+ enabled: false
+
+ # -- Configure L2 announcements
+ l2announcements:
+ # -- Enable L2 announcements
+ enabled: false
+ # -- If a lease is not renewed for X duration, the current leader is considered dead, a new leader is picked
+ # leaseDuration: 15s
+ # -- The interval at which the leader will renew the lease
+ # leaseRenewDeadline: 5s
+ # -- The timeout between retries if renewal fails
+ # leaseRetryPeriod: 2s
+
+ # -- Configure L2 pod announcements
+ l2podAnnouncements:
+ # -- Enable L2 pod announcements
+ enabled: false
+ # -- Interface used for sending Gratuitous ARP pod announcements
+ interface: "eth0"
+
+ # -- Configure BGP
+ bgp:
+ # -- Enable BGP support inside Cilium; embeds a new ConfigMap for BGP inside
+ # cilium-agent and cilium-operator
+ enabled: false
+ announce:
+ # -- Enable allocation and announcement of service LoadBalancer IPs
+ loadbalancerIP: false
+ # -- Enable announcement of node pod CIDR
+ podCIDR: false
+
+ # -- This feature set enables virtual BGP routers to be created via
+ # CiliumBGPPeeringPolicy CRDs.
+ bgpControlPlane:
+ # -- Enables the BGP control plane.
+ enabled: false
+ # -- SecretsNamespace is the namespace which BGP support will retrieve secrets from.
+ secretsNamespace:
+ # -- Create secrets namespace for BGP secrets.
+ create: false
+ # -- The name of the secret namespace to which Cilium agents are given read access
+ name: kube-system
+
+ pmtuDiscovery:
+ # -- Enable path MTU discovery to send ICMP fragmentation-needed replies to
+ # the client.
+ enabled: false
+
+ bpf:
+ autoMount:
+ # -- Enable automatic mount of BPF filesystem
+ # When `autoMount` is enabled, the BPF filesystem is mounted at
+ # `bpf.root` path on the underlying host and inside the cilium agent pod.
+ # If users disable `autoMount`, it's expected that users have mounted
+ # bpffs filesystem at the specified `bpf.root` volume, and then the
+ # volume will be mounted inside the cilium agent pod at the same path.
+ enabled: true
+ # -- Configure the mount point for the BPF filesystem
+ root: /sys/fs/bpf
+
+ # -- Enables pre-allocation of eBPF map values. This increases
+ # memory usage but can reduce latency.
+ preallocateMaps: false
+
+ # -- (int) Configure the maximum number of entries in auth map.
+ # @default -- `524288`
+ authMapMax: ~
+
+ # -- (int) Configure the maximum number of entries in the TCP connection tracking
+ # table.
+ # @default -- `524288`
+ ctTcpMax: ~
+
+ # -- (int) Configure the maximum number of entries for the non-TCP connection
+ # tracking table.
+ # @default -- `262144`
+ ctAnyMax: ~
+
+ # -- Configure the maximum number of service entries in the
+ # load balancer maps.
+ lbMapMax: 65536
+
+ # -- (int) Configure the maximum number of entries for the NAT table.
+ # @default -- `524288`
+ natMax: ~
+
+ # -- (int) Configure the maximum number of entries for the neighbor table.
+ # @default -- `524288`
+ neighMax: ~
+
+ # -- Configure the maximum number of entries in endpoint policy map (per endpoint).
+ policyMapMax: 16384
+
+ # -- (float64) Configure auto-sizing for all BPF maps based on available memory.
+ # ref: https://docs.cilium.io/en/stable/network/ebpf/maps/
+ # @default -- `0.0025`
+ mapDynamicSizeRatio: ~
+
+ # -- Configure the level of aggregation for monitor notifications.
+ # Valid options are none, low, medium, maximum.
+ monitorAggregation: medium
+
+ # -- Configure the typical time between monitor notifications for
+ # active connections.
+ monitorInterval: "5s"
+
+ # -- Configure which TCP flags trigger notifications when seen for the
+ # first time in a connection.
+ monitorFlags: "all"
+
+ # -- Allow cluster external access to ClusterIP services.
+ lbExternalClusterIP: false
+
+ # -- (bool) Enable native IP masquerade support in eBPF
+ # @default -- `false`
+ masquerade: ~
+
+ # -- (bool) Configure whether direct routing mode should route traffic via
+ # host stack (true) or directly and more efficiently out of BPF (false) if
+ # the kernel supports it. The latter has the implication that it will also
+ # bypass netfilter in the host namespace.
+ # @default -- `false`
+ hostLegacyRouting: ~
+
+ # -- (bool) Configure the eBPF-based TPROXY to reduce reliance on iptables rules
+ # for implementing Layer 7 policy.
+ # @default -- `false`
+ tproxy: ~
+
+ # -- (list) Configure explicitly allowed VLAN id's for bpf logic bypass.
+ # [0] will allow all VLAN id's without any filtering.
+ # @default -- `[]`
+ vlanBypass: ~
+
+ # -- Enable BPF clock source probing for more efficient tick retrieval.
+ bpfClockProbe: false
+
+ # -- Clean all eBPF datapath state from the initContainer of the cilium-agent
+ # DaemonSet.
+ #
+ # WARNING: Use with care!
+ cleanBpfState: false
+
+ # -- Clean all local Cilium state from the initContainer of the cilium-agent
+ # DaemonSet. Implies cleanBpfState: true.
+ #
+ # WARNING: Use with care!
+ cleanState: false
+
+ # -- Wait for KUBE-PROXY-CANARY iptables rule to appear in "wait-for-kube-proxy"
+ # init container before launching cilium-agent.
+ # More context can be found in the commit message of below PR
+ # https://github.com/cilium/cilium/pull/20123
+ waitForKubeProxy: false
+
+ cni:
+ # -- Install the CNI configuration and binary files into the filesystem.
+ install: true
+
+ # -- Remove the CNI configuration and binary files on agent shutdown. Enable this
+ # if you're removing Cilium from the cluster. Disable this to prevent the CNI
+ # configuration file from being removed during agent upgrade, which can cause
+ # nodes to go unmanageable.
+ uninstall: false
+
+ # -- Configure chaining on top of other CNI plugins. Possible values:
+ # - none
+ # - aws-cni
+ # - flannel
+ # - generic-veth
+ # - portmap
+ chainingMode: ~
+
+ # -- A CNI network name in to which the Cilium plugin should be added as a chained plugin.
+ # This will cause the agent to watch for a CNI network with this network name. When it is
+ # found, this will be used as the basis for Cilium's CNI configuration file. If this is
+ # set, it assumes a chaining mode of generic-veth. As a special case, a chaining mode
+ # of aws-cni implies a chainingTarget of aws-cni.
+ chainingTarget: ~
+
+ # -- Make Cilium take ownership over the `/etc/cni/net.d` directory on the
+ # node, renaming all non-Cilium CNI configurations to `*.cilium_bak`.
+ # This ensures no Pods can be scheduled using other CNI plugins during Cilium
+ # agent downtime.
+ exclusive: false
+
+ # -- Configure the log file for CNI logging with retention policy of 7 days.
+ # Disable CNI file logging by setting this field to empty explicitly.
+ logFile: /var/run/cilium/cilium-cni.log
+
+ # -- Skip writing of the CNI configuration. This can be used if
+ # writing of the CNI configuration is performed by external automation.
+ customConf: false
+
+ # -- Configure the path to the CNI configuration directory on the host.
+ confPath: /etc/cni/net.d
+
+ # -- Configure the path to the CNI binary directory on the host.
+ binPath: /opt/cni/bin
+
+ # -- Specify the path to a CNI config to read from on agent start.
+ # This can be useful if you want to manage your CNI
+ # configuration outside of a Kubernetes environment. This parameter is
+ # mutually exclusive with the 'cni.configMap' parameter. The agent will
+ # write this to 05-cilium.conflist on startup.
+ # readCniConf: /host/etc/cni/net.d/05-sample.conflist.input
+
+ # -- When defined, configMap will mount the provided value as ConfigMap and
+ # interpret the cniConf variable as CNI configuration file and write it
+ # when the agent starts up
+ # configMap: cni-configuration
+
+ # -- Configure the key in the CNI ConfigMap to read the contents of
+ # the CNI configuration from.
+ configMapKey: cni-config
+
+ # -- Configure the path to where to mount the ConfigMap inside the agent pod.
+ confFileMountPath: /tmp/cni-configuration
+
+ # -- Configure the path to where the CNI configuration directory is mounted
+ # inside the agent pod.
+ hostConfDirMountPath: /host/etc/cni/net.d
+
+ # -- Specifies the resources for the cni initContainer
+ resources:
+ requests:
+ cpu: 100m
+ memory: 10Mi
+
+ # -- (string) Configure how frequently garbage collection should occur for the datapath
+ # connection tracking table.
+ # @default -- `"0s"`
+ conntrackGCInterval: ""
+
+ # -- (string) Configure the maximum frequency for the garbage collection of the
+ # connection tracking table. Only affects the automatic computation for the frequency
+ # and has no effect when 'conntrackGCInterval' is set. This can be set to more frequently
+ # clean up unused identities created from ToFQDN policies.
+ conntrackGCMaxInterval: ""
+
+ # -- Configure container runtime specific integration.
+ # Deprecated in favor of bpf.autoMount.enabled. To be removed in 1.15.
+ containerRuntime:
+ # -- Enables specific integrations for container runtimes.
+ # Supported values:
+ # - crio
+ # - none
+ integration: none
+
+ # -- (string) Configure timeout in which Cilium will exit if CRDs are not available
+ # @default -- `"5m"`
+ crdWaitTimeout: ""
+
+ # -- Tail call hooks for custom eBPF programs.
+ customCalls:
+ # -- Enable tail call hooks for custom eBPF programs.
+ enabled: false
+
+ daemon:
+ # -- Configure where Cilium runtime state should be stored.
+ runPath: "/var/run/cilium"
+
+ # -- Configure a custom list of possible configuration override sources
+ # The default is "config-map:cilium-config,cilium-node-config". For supported
+ # values, see the help text for the build-config subcommand.
+ # Note that this value should be a comma-separated string.
+ configSources: ~
+
+ # -- allowedConfigOverrides is a list of config-map keys that can be overridden.
+ # That is to say, if this value is set, config sources (excepting the first one) can
+ # only override keys in this list.
+ #
+ # This takes precedence over blockedConfigOverrides.
+ #
+ # By default, all keys may be overridden. To disable overrides, set this to "none" or
+ # change the configSources variable.
+ allowedConfigOverrides: ~
+
+ # -- blockedConfigOverrides is a list of config-map keys that may not be overridden.
+ # In other words, if any of these keys appear in a configuration source excepting the
+ # first one, they will be ignored
+ #
+ # This is ignored if allowedConfigOverrides is set.
+ #
+ # By default, all keys may be overridden.
+ blockedConfigOverrides: ~
+
+ # -- Specify which network interfaces can run the eBPF datapath. This means
+ # that a packet sent from a pod to a destination outside the cluster will be
+ # masqueraded (to an output device IPv4 address), if the output device runs the
+ # program. When not specified, probing will automatically detect devices that have
+ # a non-local route. This should be used only when autodetection is not suitable.
+ # devices: ""
+
+ # -- Enables experimental support for the detection of new and removed datapath
+ # devices. When devices change the eBPF datapath is reloaded and services updated.
+ # If "devices" is set then only those devices, or devices matching a wildcard will
+ # be considered.
+ enableRuntimeDeviceDetection: false
+
+ # -- Chains to ignore when installing feeder rules.
+ # disableIptablesFeederRules: ""
+
+ # -- Limit iptables-based egress masquerading to interface selector.
+ # egressMasqueradeInterfaces: ""
+
+ # -- Enable setting identity mark for local traffic.
+ # enableIdentityMark: true
+
+ # -- Enable Kubernetes EndpointSlice feature in Cilium if the cluster supports it.
+ # enableK8sEndpointSlice: true
+
+ # -- Enable CiliumEndpointSlice feature.
+ enableCiliumEndpointSlice: false
+
+ envoyConfig:
+ # -- Enable CiliumEnvoyConfig CRD
+ # CiliumEnvoyConfig CRD can also be implicitly enabled by other options.
+ enabled: false
+
+ # -- SecretsNamespace is the namespace in which envoy SDS will retrieve secrets from.
+ secretsNamespace:
+ # -- Create secrets namespace for CiliumEnvoyConfig CRDs.
+ create: true
+
+ # -- The name of the secret namespace to which Cilium agents are given read access.
+ name: cilium-secrets
+
+ ingressController:
+ # -- Enable cilium ingress controller
+ # This will automatically set enable-envoy-config as well.
+ enabled: false
+
+ # -- Set cilium ingress controller to be the default ingress controller
+ # This will let cilium ingress controller route entries without ingress class set
+ default: false
+
+ # -- Default ingress load balancer mode
+ # Supported values: shared, dedicated
+ # For granular control, use the following annotations on the ingress resource
+ # ingress.cilium.io/loadbalancer-mode: shared|dedicated,
+ loadbalancerMode: dedicated
+
+ # -- Enforce https for host having matching TLS host in Ingress.
+ # Incoming traffic to http listener will return 308 http error code with respective location in header.
+ enforceHttps: true
+
+ # -- Enable proxy protocol for all Ingress listeners. Note that _only_ Proxy protocol traffic will be accepted once this is enabled.
+ enableProxyProtocol: false
+
+ # -- IngressLBAnnotations are the annotation and label prefixes, which are used to filter annotations and/or labels to propagate from Ingress to the Load Balancer service
+ ingressLBAnnotationPrefixes: ['service.beta.kubernetes.io', 'service.kubernetes.io', 'cloud.google.com']
+
+ # -- Default secret namespace for ingresses without .spec.tls[].secretName set.
+ defaultSecretNamespace:
+
+ # -- Default secret name for ingresses without .spec.tls[].secretName set.
+ defaultSecretName:
+
+ # -- SecretsNamespace is the namespace in which envoy SDS will retrieve TLS secrets from.
+ secretsNamespace:
+ # -- Create secrets namespace for Ingress.
+ create: true
+
+ # -- Name of Ingress secret namespace.
+ name: cilium-secrets
+
+ # -- Enable secret sync, which will make sure all TLS secrets used by Ingress are synced to secretsNamespace.name.
+ # If disabled, TLS secrets must be maintained externally.
+ sync: true
+
+ # -- Load-balancer service in shared mode.
+ # This is a single load-balancer service for all Ingress resources.
+ service:
+ # -- Service name
+ name: cilium-ingress
+ # -- Labels to be added for the shared LB service
+ labels: {}
+ # -- Annotations to be added for the shared LB service
+ annotations: {}
+ # -- Service type for the shared LB service
+ type: LoadBalancer
+ # -- Configure a specific nodePort for insecure HTTP traffic on the shared LB service
+ insecureNodePort: ~
+ # -- Configure a specific nodePort for secure HTTPS traffic on the shared LB service
+ secureNodePort : ~
+ # -- Configure a specific loadBalancerClass on the shared LB service (requires Kubernetes 1.24+)
+ loadBalancerClass: ~
+ # -- Configure a specific loadBalancerIP on the shared LB service
+ loadBalancerIP : ~
+ # -- Configure if node port allocation is required for LB service
+ # ref: https://kubernetes.io/docs/concepts/services-networking/service/#load-balancer-nodeport-allocation
+ allocateLoadBalancerNodePorts: ~
+
+ gatewayAPI:
+ # -- Enable support for Gateway API in cilium
+ # This will automatically set enable-envoy-config as well.
+ enabled: false
+
+ # -- SecretsNamespace is the namespace in which envoy SDS will retrieve TLS secrets from.
+ secretsNamespace:
+ # -- Create secrets namespace for Gateway API.
+ create: true
+
+ # -- Name of Gateway API secret namespace.
+ name: cilium-secrets
+
+ # -- Enable secret sync, which will make sure all TLS secrets used by Ingress are synced to secretsNamespace.name.
+ # If disabled, TLS secrets must be maintained externally.
+ sync: true
+
+ # -- Enables the fallback compatibility solution for when the xt_socket kernel
+ # module is missing and it is needed for the datapath L7 redirection to work
+ # properly. See documentation for details on when this can be disabled:
+ # https://docs.cilium.io/en/stable/operations/system_requirements/#linux-kernel.
+ enableXTSocketFallback: true
+
+ encryption:
+ # -- Enable transparent network encryption.
+ enabled: false
+
+ # -- Encryption method. Can be either ipsec or wireguard.
+ type: ipsec
+
+ # -- Enable encryption for pure node to node traffic.
+ # This option is only effective when encryption.type is set to "wireguard".
+ nodeEncryption: false
+
+ # -- Configure the WireGuard Pod2Pod strict mode.
+ strictMode:
+ # -- Enable WireGuard Pod2Pod strict mode.
+ enabled: false
+
+ # -- CIDR for the WireGuard Pod2Pod strict mode.
+ cidr: ""
+
+ # -- Allow dynamic lookup of remote node identities.
+ # This is required when tunneling is used or direct routing is used and the node CIDR and pod CIDR overlap.
+ allowRemoteNodeIdentities: false
+
+ ipsec:
+ # -- Name of the key file inside the Kubernetes secret configured via secretName.
+ keyFile: ""
+
+ # -- Path to mount the secret inside the Cilium pod.
+ mountPath: ""
+
+ # -- Name of the Kubernetes secret containing the encryption keys.
+ secretName: ""
+
+ # -- The interface to use for encrypted traffic.
+ interface: ""
+
+ # -- Enable the key watcher. If disabled, a restart of the agent will be
+ # necessary on key rotations.
+ keyWatcher: true
+
+ # -- Maximum duration of the IPsec key rotation. The previous key will be
+ # removed after that delay.
+ keyRotationDuration: "5m"
+
+ wireguard:
+ # -- Enables the fallback to the user-space implementation.
+ userspaceFallback: false
+ # -- Controls Wireguard PersistentKeepalive option. Set 0s to disable.
+ persistentKeepalive: 0s
+
+ # -- Deprecated in favor of encryption.ipsec.keyFile. To be removed in 1.15.
+ # Name of the key file inside the Kubernetes secret configured via secretName.
+ # This option is only effective when encryption.type is set to ipsec.
+ keyFile: keys
+
+ # -- Deprecated in favor of encryption.ipsec.mountPath. To be removed in 1.15.
+ # Path to mount the secret inside the Cilium pod.
+ # This option is only effective when encryption.type is set to ipsec.
+ mountPath: /etc/ipsec
+
+ # -- Deprecated in favor of encryption.ipsec.secretName. To be removed in 1.15.
+ # Name of the Kubernetes secret containing the encryption keys.
+ # This option is only effective when encryption.type is set to ipsec.
+ secretName: cilium-ipsec-keys
+
+ # -- Deprecated in favor of encryption.ipsec.interface. To be removed in 1.15.
+ # The interface to use for encrypted traffic.
+ # This option is only effective when encryption.type is set to ipsec.
+ interface: ""
+
+ endpointHealthChecking:
+ # -- Enable connectivity health checking between virtual endpoints.
+ enabled: true
+
+ # -- Enable endpoint status.
+ # Status can be: policy, health, controllers, log and / or state. For 2 or more options use a space.
+ endpointStatus:
+ enabled: false
+ status: ""
+
+ endpointRoutes:
+ # -- Enable use of per endpoint routes instead of routing via
+ # the cilium_host interface.
+ enabled: false
+
+ k8sNetworkPolicy:
+ # -- Enable support for K8s NetworkPolicy
+ enabled: true
+
+ eni:
+ # -- Enable Elastic Network Interface (ENI) integration.
+ enabled: false
+ # -- Update ENI Adapter limits from the EC2 API
+ updateEC2AdapterLimitViaAPI: true
+ # -- Release IPs not used from the ENI
+ awsReleaseExcessIPs: false
+ # -- Enable ENI prefix delegation
+ awsEnablePrefixDelegation: false
+ # -- EC2 API endpoint to use
+ ec2APIEndpoint: ""
+ # -- Tags to apply to the newly created ENIs
+ eniTags: {}
+ # -- Interval for garbage collection of unattached ENIs. Set to "0s" to disable.
+ # @default -- `"5m"`
+ gcInterval: ""
+ # -- Additional tags attached to ENIs created by Cilium.
+ # Dangling ENIs with this tag will be garbage collected
+ # @default -- `{"io.cilium/cilium-managed":"true,"io.cilium/cluster-name":""}`
+ gcTags: {}
+ # -- If using IAM role for Service Accounts will not try to
+ # inject identity values from cilium-aws kubernetes secret.
+ # Adds annotation to service account if managed by Helm.
+ # See https://github.com/aws/amazon-eks-pod-identity-webhook
+ iamRole: ""
+ # -- Filter via subnet IDs which will dictate which subnets are going to be used to create new ENIs
+ # Important note: This requires that each instance has an ENI with a matching subnet attached
+ # when Cilium is deployed. If you only want to control subnets for ENIs attached by Cilium,
+ # use the CNI configuration file settings (cni.customConf) instead.
+ subnetIDsFilter: []
+ # -- Filter via tags (k=v) which will dictate which subnets are going to be used to create new ENIs
+ # Important note: This requires that each instance has an ENI with a matching subnet attached
+ # when Cilium is deployed. If you only want to control subnets for ENIs attached by Cilium,
+ # use the CNI configuration file settings (cni.customConf) instead.
+ subnetTagsFilter: []
+ # -- Filter via AWS EC2 Instance tags (k=v) which will dictate which AWS EC2 Instances
+ # are going to be used to create new ENIs
+ instanceTagsFilter: []
+
+ externalIPs:
+ # -- Enable ExternalIPs service support.
+ enabled: false
+
+ # fragmentTracking enables IPv4 fragment tracking support in the datapath.
+ # fragmentTracking: true
+
+ gke:
+ # -- Enable Google Kubernetes Engine integration
+ enabled: false
+
+ # -- Enable connectivity health checking.
+ healthChecking: true
+
+ # -- TCP port for the agent health API. This is not the port for cilium-health.
+ healthPort: 9879
+
+ # -- Configure the host firewall.
+ hostFirewall:
+ # -- Enables the enforcement of host policies in the eBPF datapath.
+ enabled: false
+
+ hostPort:
+ # -- Enable hostPort service support.
+ enabled: false
+
+ # -- Configure socket LB
+ socketLB:
+ # -- Enable socket LB
+ enabled: false
+
+ # -- Disable socket lb for non-root ns. This is used to enable Istio routing rules.
+ # hostNamespaceOnly: false
+
+ # -- Configure certificate generation for Hubble integration.
+ # If hubble.tls.auto.method=cronJob, these values are used
+ # for the Kubernetes CronJob which will be scheduled regularly to
+ # (re)generate any certificates not provided manually.
+ certgen:
+ image:
+ override: ~
+ repository: "quay.io/cilium/certgen"
+ tag: "v0.1.9"
+ digest: "sha256:89a0847753686444daabde9474b48340993bd19c7bea66a46e45b2974b82041f"
+ useDigest: true
+ pullPolicy: "IfNotPresent"
+ # -- Seconds after which the completed job pod will be deleted
+ ttlSecondsAfterFinished: 1800
+ # -- Labels to be added to hubble-certgen pods
+ podLabels: {}
+ # -- Annotations to be added to the hubble-certgen initial Job and CronJob
+ annotations:
+ job: {}
+ cronJob: {}
+ # -- Node tolerations for pod assignment on nodes with taints
+ # ref: https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/
+ tolerations: []
+
+ # -- Additional certgen volumes.
+ extraVolumes: []
+
+ # -- Additional certgen volumeMounts.
+ extraVolumeMounts: []
+
+ # -- Affinity for certgen
+ affinity: {}
+
+ hubble:
+ # -- Enable Hubble (true by default).
+ enabled: true
+
+ # -- Annotations to be added to all top-level hubble objects (resources under templates/hubble)
+ annotations: {}
+
+ # -- Buffer size of the channel Hubble uses to receive monitor events. If this
+ # value is not set, the queue size is set to the default monitor queue size.
+ # eventQueueSize: ""
+
+ # -- Number of recent flows for Hubble to cache. Defaults to 4095.
+ # Possible values are:
+ # 1, 3, 7, 15, 31, 63, 127, 255, 511, 1023,
+ # 2047, 4095, 8191, 16383, 32767, 65535
+ # eventBufferCapacity: "4095"
+
+ # -- Hubble metrics configuration.
+ # See https://docs.cilium.io/en/stable/observability/metrics/#hubble-metrics
+ # for more comprehensive documentation about Hubble metrics.
+ metrics:
+ # -- Configures the list of metrics to collect. If empty or null, metrics
+ # are disabled.
+ # Example:
+ #
+ # enabled:
+ # - dns:query;ignoreAAAA
+ # - drop
+ # - tcp
+ # - flow
+ # - icmp
+ # - http
+ #
+ # You can specify the list of metrics from the helm CLI:
+ #
+ # --set hubble.metrics.enabled="{dns:query;ignoreAAAA,drop,tcp,flow,icmp,http}"
+ #
+ enabled: ~
+ # -- Enables exporting hubble metrics in OpenMetrics format.
+ enableOpenMetrics: false
+ # -- Configure the port the hubble metric server listens on.
+ port: 9965
+ # -- Annotations to be added to hubble-metrics service.
+ serviceAnnotations: {}
+ serviceMonitor:
+ # -- Create ServiceMonitor resources for Prometheus Operator.
+ # This requires the prometheus CRDs to be available.
+ # ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/example/prometheus-operator-crd/monitoring.coreos.com_servicemonitors.yaml)
+ enabled: false
+ # -- Labels to add to ServiceMonitor hubble
+ labels: {}
+ # -- Annotations to add to ServiceMonitor hubble
+ annotations: {}
+ # -- jobLabel to add for ServiceMonitor hubble
+ jobLabel: ""
+ # -- Interval for scrape metrics.
+ interval: "10s"
+ # -- Relabeling configs for the ServiceMonitor hubble
+ relabelings:
+ - sourceLabels:
+ - __meta_kubernetes_pod_node_name
+ targetLabel: node
+ replacement: ${1}
+ # -- Metrics relabeling configs for the ServiceMonitor hubble
+ metricRelabelings: ~
+ # -- Grafana dashboards for hubble
+ # grafana can import dashboards based on the label and value
+ # ref: https://github.com/grafana/helm-charts/tree/main/charts/grafana#sidecar-for-dashboards
+ dashboards:
+ enabled: false
+ label: grafana_dashboard
+ namespace: ~
+ labelValue: "1"
+ annotations: {}
+
+ # -- Unix domain socket path to listen to when Hubble is enabled.
+ socketPath: /var/run/cilium/hubble.sock
+
+ # -- Enables redacting sensitive information present in Layer 7 flows.
+ redact:
+ enabled: false
+ http:
+ # -- Enables redacting URL query (GET) parameters.
+ # Example:
+ #
+ # redact:
+ # enabled: true
+ # http:
+ # urlQuery: true
+ #
+ # You can specify the options from the helm CLI:
+ #
+ # --set hubble.redact.enabled="true"
+ # --set hubble.redact.http.urlQuery="true"
+ urlQuery: false
+ # -- Enables redacting user info, e.g., password when basic auth is used.
+ # Example:
+ #
+ # redact:
+ # enabled: true
+ # http:
+ # userInfo: true
+ #
+ # You can specify the options from the helm CLI:
+ #
+ # --set hubble.redact.enabled="true"
+ # --set hubble.redact.http.userInfo="true"
+ userInfo: true
+ headers:
+ # -- List of HTTP headers to allow: headers not matching will be redacted. Note: `allow` and `deny` lists cannot be used both at the same time, only one can be present.
+ # Example:
+ # redact:
+ # enabled: true
+ # http:
+ # headers:
+ # allow:
+ # - traceparent
+ # - tracestate
+ # - Cache-Control
+ #
+ # You can specify the options from the helm CLI:
+ # --set hubble.redact.enabled="true"
+ # --set hubble.redact.http.headers.allow="traceparent,tracestate,Cache-Control"
+ allow: []
+ # -- List of HTTP headers to deny: matching headers will be redacted. Note: `allow` and `deny` lists cannot be used both at the same time, only one can be present.
+ # Example:
+ # redact:
+ # enabled: true
+ # http:
+ # headers:
+ # deny:
+ # - Authorization
+ # - Proxy-Authorization
+ #
+ # You can specify the options from the helm CLI:
+ # --set hubble.redact.enabled="true"
+ # --set hubble.redact.http.headers.deny="Authorization,Proxy-Authorization"
+ deny: []
+ kafka:
+ # -- Enables redacting Kafka's API key.
+ # Example:
+ #
+ # redact:
+ # enabled: true
+ # kafka:
+ # apiKey: true
+ #
+ # You can specify the options from the helm CLI:
+ #
+ # --set hubble.redact.enabled="true"
+ # --set hubble.redact.kafka.apiKey="true"
+ apiKey: false
+
+ # -- An additional address for Hubble to listen to.
+ # Set this field ":4244" if you are enabling Hubble Relay, as it assumes that
+ # Hubble is listening on port 4244.
+ listenAddress: ":4244"
+ # -- Whether Hubble should prefer to announce IPv6 or IPv4 addresses if both are available.
+ preferIpv6: false
+ # -- (bool) Skip Hubble events with unknown cgroup ids
+ # @default -- `true`
+ skipUnknownCGroupIDs: ~
+
+ peerService:
+ # -- Service Port for the Peer service.
+ # If not set, it is dynamically assigned to port 443 if TLS is enabled and to
+ # port 80 if not.
+ # servicePort: 80
+ # -- Target Port for the Peer service, must match the hubble.listenAddress'
+ # port.
+ targetPort: 4244
+ # -- The cluster domain to use to query the Hubble Peer service. It should
+ # be the local cluster.
+ clusterDomain: cluster.local
+ # -- TLS configuration for Hubble
+ tls:
+ # -- Enable mutual TLS for listenAddress. Setting this value to false is
+ # highly discouraged as the Hubble API provides access to potentially
+ # sensitive network flow metadata and is exposed on the host network.
+ enabled: true
+ # -- Configure automatic TLS certificates generation.
+ auto:
+ # -- Auto-generate certificates.
+ # When set to true, automatically generate a CA and certificates to
+ # enable mTLS between Hubble server and Hubble Relay instances. If set to
+ # false, the certs for Hubble server need to be provided by setting
+ # appropriate values below.
+ enabled: true
+ # -- Set the method to auto-generate certificates. Supported values:
+ # - helm: This method uses Helm to generate all certificates.
+ # - cronJob: This method uses a Kubernetes CronJob the generate any
+ # certificates not provided by the user at installation
+ # time.
+ # - certmanager: This method use cert-manager to generate & rotate certificates.
+ method: helm
+ # -- Generated certificates validity duration in days.
+ certValidityDuration: 1095
+ # -- Schedule for certificates regeneration (regardless of their expiration date).
+ # Only used if method is "cronJob". If nil, then no recurring job will be created.
+ # Instead, only the one-shot job is deployed to generate the certificates at
+ # installation time.
+ #
+ # Defaults to midnight of the first day of every fourth month. For syntax, see
+ # https://kubernetes.io/docs/concepts/workloads/controllers/cron-jobs/#schedule-syntax
+ schedule: "0 0 1 */4 *"
+
+ # [Example]
+ # certManagerIssuerRef:
+ # group: cert-manager.io
+ # kind: ClusterIssuer
+ # name: ca-issuer
+ # -- certmanager issuer used when hubble.tls.auto.method=certmanager.
+ certManagerIssuerRef: {}
+
+ # -- base64 encoded PEM values for the Hubble server certificate and private key
+ server:
+ cert: ""
+ key: ""
+ # -- Extra DNS names added to certificate when it's auto generated
+ extraDnsNames: []
+ # -- Extra IP addresses added to certificate when it's auto generated
+ extraIpAddresses: []
+
+ relay:
+ # -- Enable Hubble Relay (requires hubble.enabled=true)
+ enabled: false
+
+ # -- Roll out Hubble Relay pods automatically when configmap is updated.
+ rollOutPods: false
+
+ # -- Hubble-relay container image.
+ image:
+ override: ~
+ repository: "quay.io/cilium/hubble-relay"
+ tag: "v1.15.3"
+ # hubble-relay-digest
+ digest: ""
+ useDigest: false
+ pullPolicy: "IfNotPresent"
+
+ # -- Specifies the resources for the hubble-relay pods
+ resources: {}
+
+ # -- Number of replicas run for the hubble-relay deployment.
+ replicas: 1
+
+ # -- Affinity for hubble-replay
+ affinity:
+ podAffinity:
+ requiredDuringSchedulingIgnoredDuringExecution:
+ - topologyKey: kubernetes.io/hostname
+ labelSelector:
+ matchLabels:
+ k8s-app: cilium
+
+ # -- Pod topology spread constraints for hubble-relay
+ topologySpreadConstraints: []
+ # - maxSkew: 1
+ # topologyKey: topology.kubernetes.io/zone
+ # whenUnsatisfiable: DoNotSchedule
+
+ # -- Node labels for pod assignment
+ # ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodeselector
+ nodeSelector:
+ kubernetes.io/os: linux
+
+ # -- Node tolerations for pod assignment on nodes with taints
+ # ref: https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/
+ tolerations: []
+
+ # -- Additional hubble-relay environment variables.
+ extraEnv: []
+
+ # -- Annotations to be added to all top-level hubble-relay objects (resources under templates/hubble-relay)
+ annotations: {}
+
+ # -- Annotations to be added to hubble-relay pods
+ podAnnotations: {}
+
+ # -- Labels to be added to hubble-relay pods
+ podLabels: {}
+
+ # PodDisruptionBudget settings
+ podDisruptionBudget:
+ # -- enable PodDisruptionBudget
+ # ref: https://kubernetes.io/docs/concepts/workloads/pods/disruptions/
+ enabled: false
+ # -- Minimum number/percentage of pods that should remain scheduled.
+ # When it's set, maxUnavailable must be disabled by `maxUnavailable: null`
+ minAvailable: null
+ # -- Maximum number/percentage of pods that may be made unavailable
+ maxUnavailable: 1
+
+ # -- The priority class to use for hubble-relay
+ priorityClassName: ""
+
+ # -- Configure termination grace period for hubble relay Deployment.
+ terminationGracePeriodSeconds: 1
+
+ # -- hubble-relay update strategy
+ updateStrategy:
+ type: RollingUpdate
+ rollingUpdate:
+ maxUnavailable: 1
+
+ # -- Additional hubble-relay volumes.
+ extraVolumes: []
+
+ # -- Additional hubble-relay volumeMounts.
+ extraVolumeMounts: []
+
+ # -- hubble-relay pod security context
+ podSecurityContext:
+ fsGroup: 65532
+
+ # -- hubble-relay container security context
+ securityContext:
+ # readOnlyRootFilesystem: true
+ runAsNonRoot: true
+ runAsUser: 65532
+ runAsGroup: 65532
+ capabilities:
+ drop:
+ - ALL
+
+ # -- hubble-relay service configuration.
+ service:
+ # --- The type of service used for Hubble Relay access, either ClusterIP or NodePort.
+ type: ClusterIP
+ # --- The port to use when the service type is set to NodePort.
+ nodePort: 31234
+
+ # -- Host to listen to. Specify an empty string to bind to all the interfaces.
+ listenHost: ""
+
+ # -- Port to listen to.
+ listenPort: "4245"
+
+ # -- TLS configuration for Hubble Relay
+ tls:
+ # -- base64 encoded PEM values for the hubble-relay client certificate and private key
+ # This keypair is presented to Hubble server instances for mTLS
+ # authentication and is required when hubble.tls.enabled is true.
+ # These values need to be set manually if hubble.tls.auto.enabled is false.
+ client:
+ cert: ""
+ key: ""
+ # -- base64 encoded PEM values for the hubble-relay server certificate and private key
+ server:
+ # When set to true, enable TLS on for Hubble Relay server
+ # (ie: for clients connecting to the Hubble Relay API).
+ enabled: false
+ # When set to true enforces mutual TLS between Hubble Relay server and its clients.
+ # False allow non-mutual TLS connections.
+ # This option has no effect when TLS is disabled.
+ mtls: false
+ # These values need to be set manually if hubble.tls.auto.enabled is false.
+ cert: ""
+ key: ""
+ # -- extra DNS names added to certificate when its auto gen
+ extraDnsNames: []
+ # -- extra IP addresses added to certificate when its auto gen
+ extraIpAddresses: []
+ # DNS name used by the backend to connect to the relay
+ # This is a simple workaround as the relay certificates are currently hardcoded to
+ # *.hubble-relay.cilium.io
+ # See https://github.com/cilium/cilium/pull/28709#discussion_r1371792546
+ # For GKE Dataplane V2 this should be set to relay.kube-system.svc.cluster.local
+ relayName: "ui.hubble-relay.cilium.io"
+
+ # -- Dial timeout to connect to the local hubble instance to receive peer information (e.g. "30s").
+ dialTimeout: ~
+
+ # -- Backoff duration to retry connecting to the local hubble instance in case of failure (e.g. "30s").
+ retryTimeout: ~
+
+ # -- Max number of flows that can be buffered for sorting before being sent to the
+ # client (per request) (e.g. 100).
+ sortBufferLenMax: ~
+
+ # -- When the per-request flows sort buffer is not full, a flow is drained every
+ # time this timeout is reached (only affects requests in follow-mode) (e.g. "1s").
+ sortBufferDrainTimeout: ~
+
+ # -- Port to use for the k8s service backed by hubble-relay pods.
+ # If not set, it is dynamically assigned to port 443 if TLS is enabled and to
+ # port 80 if not.
+ # servicePort: 80
+
+ # -- Enable prometheus metrics for hubble-relay on the configured port at
+ # /metrics
+ prometheus:
+ enabled: false
+ port: 9966
+ serviceMonitor:
+ # -- Enable service monitors.
+ # This requires the prometheus CRDs to be available (see https://github.com/prometheus-operator/prometheus-operator/blob/main/example/prometheus-operator-crd/monitoring.coreos.com_servicemonitors.yaml)
+ enabled: false
+ # -- Labels to add to ServiceMonitor hubble-relay
+ labels: {}
+ # -- Annotations to add to ServiceMonitor hubble-relay
+ annotations: {}
+ # -- Interval for scrape metrics.
+ interval: "10s"
+ # -- Specify the Kubernetes namespace where Prometheus expects to find
+ # service monitors configured.
+ # namespace: ""
+ # -- Relabeling configs for the ServiceMonitor hubble-relay
+ relabelings: ~
+ # -- Metrics relabeling configs for the ServiceMonitor hubble-relay
+ metricRelabelings: ~
+
+ gops:
+ # -- Enable gops for hubble-relay
+ enabled: true
+ # -- Configure gops listen port for hubble-relay
+ port: 9893
+
+ pprof:
+ # -- Enable pprof for hubble-relay
+ enabled: false
+ # -- Configure pprof listen address for hubble-relay
+ address: localhost
+ # -- Configure pprof listen port for hubble-relay
+ port: 6062
+
+ ui:
+ # -- Whether to enable the Hubble UI.
+ enabled: false
+
+ standalone:
+ # -- When true, it will allow installing the Hubble UI only, without checking dependencies.
+ # It is useful if a cluster already has cilium and Hubble relay installed and you just
+ # want Hubble UI to be deployed.
+ # When installed via helm, installing UI should be done via `helm upgrade` and when installed via the cilium cli, then `cilium hubble enable --ui`
+ enabled: false
+
+ tls:
+ # -- When deploying Hubble UI in standalone, with tls enabled for Hubble relay, it is required
+ # to provide a volume for mounting the client certificates.
+ certsVolume: {}
+ # projected:
+ # defaultMode: 0400
+ # sources:
+ # - secret:
+ # name: hubble-ui-client-certs
+ # items:
+ # - key: tls.crt
+ # path: client.crt
+ # - key: tls.key
+ # path: client.key
+ # - key: ca.crt
+ # path: hubble-relay-ca.crt
+
+ # -- Roll out Hubble-ui pods automatically when configmap is updated.
+ rollOutPods: false
+
+ tls:
+ # -- base64 encoded PEM values used to connect to hubble-relay
+ # This keypair is presented to Hubble Relay instances for mTLS
+ # authentication and is required when hubble.relay.tls.server.enabled is true.
+ # These values need to be set manually if hubble.tls.auto.enabled is false.
+ client:
+ cert: ""
+ key: ""
+
+ backend:
+ # -- Hubble-ui backend image.
+ image:
+ override: ~
+ repository: "quay.io/cilium/hubble-ui-backend"
+ tag: "v0.13.0"
+ digest: "sha256:1e7657d997c5a48253bb8dc91ecee75b63018d16ff5e5797e5af367336bc8803"
+ useDigest: true
+ pullPolicy: "IfNotPresent"
+
+ # -- Hubble-ui backend security context.
+ securityContext: {}
+
+ # -- Additional hubble-ui backend environment variables.
+ extraEnv: []
+
+ # -- Additional hubble-ui backend volumes.
+ extraVolumes: []
+
+ # -- Additional hubble-ui backend volumeMounts.
+ extraVolumeMounts: []
+
+ livenessProbe:
+ # -- Enable liveness probe for Hubble-ui backend (requires Hubble-ui 0.12+)
+ enabled: false
+
+ readinessProbe:
+ # -- Enable readiness probe for Hubble-ui backend (requires Hubble-ui 0.12+)
+ enabled: false
+
+ # -- Resource requests and limits for the 'backend' container of the 'hubble-ui' deployment.
+ resources: {}
+ # limits:
+ # cpu: 1000m
+ # memory: 1024M
+ # requests:
+ # cpu: 100m
+ # memory: 64Mi
+
+ frontend:
+ # -- Hubble-ui frontend image.
+ image:
+ override: ~
+ repository: "quay.io/cilium/hubble-ui"
+ tag: "v0.13.0"
+ digest: "sha256:7d663dc16538dd6e29061abd1047013a645e6e69c115e008bee9ea9fef9a6666"
+ useDigest: true
+ pullPolicy: "IfNotPresent"
+
+ # -- Hubble-ui frontend security context.
+ securityContext: {}
+
+ # -- Additional hubble-ui frontend environment variables.
+ extraEnv: []
+
+ # -- Additional hubble-ui frontend volumes.
+ extraVolumes: []
+
+ # -- Additional hubble-ui frontend volumeMounts.
+ extraVolumeMounts: []
+
+ # -- Resource requests and limits for the 'frontend' container of the 'hubble-ui' deployment.
+ resources: {}
+ # limits:
+ # cpu: 1000m
+ # memory: 1024M
+ # requests:
+ # cpu: 100m
+ # memory: 64Mi
+ server:
+ # -- Controls server listener for ipv6
+ ipv6:
+ enabled: true
+
+ # -- The number of replicas of Hubble UI to deploy.
+ replicas: 1
+
+ # -- Annotations to be added to all top-level hubble-ui objects (resources under templates/hubble-ui)
+ annotations: {}
+
+ # -- Annotations to be added to hubble-ui pods
+ podAnnotations: {}
+
+ # -- Labels to be added to hubble-ui pods
+ podLabels: {}
+
+ # PodDisruptionBudget settings
+ podDisruptionBudget:
+ # -- enable PodDisruptionBudget
+ # ref: https://kubernetes.io/docs/concepts/workloads/pods/disruptions/
+ enabled: false
+ # -- Minimum number/percentage of pods that should remain scheduled.
+ # When it's set, maxUnavailable must be disabled by `maxUnavailable: null`
+ minAvailable: null
+ # -- Maximum number/percentage of pods that may be made unavailable
+ maxUnavailable: 1
+
+ # -- Affinity for hubble-ui
+ affinity: {}
+
+ # -- Pod topology spread constraints for hubble-ui
+ topologySpreadConstraints: []
+ # - maxSkew: 1
+ # topologyKey: topology.kubernetes.io/zone
+ # whenUnsatisfiable: DoNotSchedule
+
+ # -- Node labels for pod assignment
+ # ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodeselector
+ nodeSelector:
+ kubernetes.io/os: linux
+
+ # -- Node tolerations for pod assignment on nodes with taints
+ # ref: https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/
+ tolerations: []
+
+ # -- The priority class to use for hubble-ui
+ priorityClassName: ""
+
+ # -- hubble-ui update strategy.
+ updateStrategy:
+ type: RollingUpdate
+ rollingUpdate:
+ maxUnavailable: 1
+
+ # -- Security context to be added to Hubble UI pods
+ securityContext:
+ runAsUser: 1001
+ runAsGroup: 1001
+ fsGroup: 1001
+
+ # -- hubble-ui service configuration.
+ service:
+ # -- Annotations to be added for the Hubble UI service
+ annotations: {}
+ # --- The type of service used for Hubble UI access, either ClusterIP or NodePort.
+ type: ClusterIP
+ # --- The port to use when the service type is set to NodePort.
+ nodePort: 31235
+
+ # -- Defines base url prefix for all hubble-ui http requests.
+ # It needs to be changed in case if ingress for hubble-ui is configured under some sub-path.
+ # Trailing `/` is required for custom path, ex. `/service-map/`
+ baseUrl: "/"
+
+ # -- hubble-ui ingress configuration.
+ ingress:
+ enabled: false
+ annotations: {}
+ # kubernetes.io/ingress.class: nginx
+ # kubernetes.io/tls-acme: "true"
+ className: ""
+ hosts:
+ - chart-example.local
+ labels: {}
+ tls: []
+ # - secretName: chart-example-tls
+ # hosts:
+ # - chart-example.local
+
+ # -- Hubble flows export.
+ export:
+ # --- Defines max file size of output file before it gets rotated.
+ fileMaxSizeMb: 10
+ # --- Defines max number of backup/rotated files.
+ fileMaxBackups: 5
+ # --- Static exporter configuration.
+ # Static exporter is bound to agent lifecycle.
+ static:
+ enabled: false
+ filePath: /var/run/cilium/hubble/events.log
+ fieldMask: []
+ # - time
+ # - source
+ # - destination
+ # - verdict
+ allowList: []
+ # - '{"verdict":["DROPPED","ERROR"]}'
+ denyList: []
+ # - '{"source_pod":["kube-system/"]}'
+ # - '{"destination_pod":["kube-system/"]}'
+ # --- Dynamic exporters configuration.
+ # Dynamic exporters may be reconfigured without a need of agent restarts.
+ dynamic:
+ enabled: false
+ config:
+ # ---- Name of configmap with configuration that may be altered to reconfigure exporters within a running agents.
+ configMapName: cilium-flowlog-config
+ # ---- True if helm installer should create config map.
+ # Switch to false if you want to self maintain the file content.
+ createConfigMap: true
+ # ---- Exporters configuration in YAML format.
+ content:
+ - name: all
+ fieldMask: []
+ includeFilters: []
+ excludeFilters: []
+ filePath: "/var/run/cilium/hubble/events.log"
+ #- name: "test002"
+ # filePath: "/var/log/network/flow-log/pa/test002.log"
+ # fieldMask: ["source.namespace", "source.pod_name", "destination.namespace", "destination.pod_name", "verdict"]
+ # includeFilters:
+ # - source_pod: ["default/"]
+ # event_type:
+ # - type: 1
+ # - destination_pod: ["frontend/nginx-975996d4c-7hhgt"]
+ # excludeFilters: []
+ # end: "2023-10-09T23:59:59-07:00"
+
+ # -- Method to use for identity allocation (`crd` or `kvstore`).
+ identityAllocationMode: "crd"
+
+ # -- (string) Time to wait before using new identity on endpoint identity change.
+ # @default -- `"5s"`
+ identityChangeGracePeriod: ""
+
+ # -- Install Iptables rules to skip netfilter connection tracking on all pod
+ # traffic. This option is only effective when Cilium is running in direct
+ # routing and full KPR mode. Moreover, this option cannot be enabled when Cilium
+ # is running in a managed Kubernetes environment or in a chained CNI setup.
+ installNoConntrackIptablesRules: false
+
+ ipam:
+ # -- Configure IP Address Management mode.
+ # ref: https://docs.cilium.io/en/stable/network/concepts/ipam/
+ # For this pack, the default mode has been switched from "cluster-pool" to
+ # "kubernetes" so that Cilium respects the PodCIDR that is configured
+ # in the K8s pack.
+ mode: "kubernetes"
+ # The alternative below is the default for the Cilium helm chart
+ # mode: "cluster-pool"
+ # # -- Maximum rate at which the CiliumNode custom resource is updated.
+ # ciliumNodeUpdateRate: "15s"
+ # operator:
+ # # -- IPv4 CIDR list range to delegate to individual nodes for IPAM.
+ # clusterPoolIPv4PodCIDRList: ["10.0.0.0/8"]
+ # # -- IPv4 CIDR mask size to delegate to individual nodes for IPAM.
+ # clusterPoolIPv4MaskSize: 24
+ # # -- IPv6 CIDR list range to delegate to individual nodes for IPAM.
+ # clusterPoolIPv6PodCIDRList: ["fd00::/104"]
+ # # -- IPv6 CIDR mask size to delegate to individual nodes for IPAM.
+ # clusterPoolIPv6MaskSize: 120
+ # # -- IP pools to auto-create in multi-pool IPAM mode.
+ # autoCreateCiliumPodIPPools: {}
+ # # default:
+ # # ipv4:
+ # # cidrs:
+ # # - 10.10.0.0/8
+ # # maskSize: 24
+ # # other:
+ # # ipv6:
+ # # cidrs:
+ # # - fd00:100::/80
+ # # maskSize: 96
+ # # -- The maximum burst size when rate limiting access to external APIs.
+ # # Also known as the token bucket capacity.
+ # # @default -- `20`
+ # externalAPILimitBurstSize: ~
+ # # -- The maximum queries per second when rate limiting access to
+ # # external APIs. Also known as the bucket refill rate, which is used to
+ # # refill the bucket up to the burst size capacity.
+ # # @default -- `4.0`
+ # externalAPILimitQPS: ~
+
+ # -- The api-rate-limit option can be used to overwrite individual settings of the default configuration for rate limiting calls to the Cilium Agent API
+ apiRateLimit: ~
+
+ # -- Configure the eBPF-based ip-masq-agent
+ ipMasqAgent:
+ enabled: false
+ # the config of nonMasqueradeCIDRs
+ # config:
+ # nonMasqueradeCIDRs: []
+ # masqLinkLocal: false
+ # masqLinkLocalIPv6: false
+
+ # iptablesLockTimeout defines the iptables "--wait" option when invoked from Cilium.
+ # iptablesLockTimeout: "5s"
+
+ ipv4:
+ # -- Enable IPv4 support.
+ enabled: true
+
+ ipv6:
+ # -- Enable IPv6 support.
+ enabled: false
+
+ # -- Configure Kubernetes specific configuration
+ k8s: {}
+ # -- requireIPv4PodCIDR enables waiting for Kubernetes to provide the PodCIDR
+ # range via the Kubernetes node resource
+ # requireIPv4PodCIDR: false
+
+ # -- requireIPv6PodCIDR enables waiting for Kubernetes to provide the PodCIDR
+ # range via the Kubernetes node resource
+ # requireIPv6PodCIDR: false
+
+ # -- Keep the deprecated selector labels when deploying Cilium DaemonSet.
+ keepDeprecatedLabels: false
+
+ # -- Keep the deprecated probes when deploying Cilium DaemonSet
+ keepDeprecatedProbes: false
+
+ startupProbe:
+ # -- failure threshold of startup probe.
+ # 105 x 2s translates to the old behaviour of the readiness probe (120s delay + 30 x 3s)
+ failureThreshold: 105
+ # -- interval between checks of the startup probe
+ periodSeconds: 2
+ livenessProbe:
+ # -- failure threshold of liveness probe
+ failureThreshold: 10
+ # -- interval between checks of the liveness probe
+ periodSeconds: 30
+ readinessProbe:
+ # -- failure threshold of readiness probe
+ failureThreshold: 3
+ # -- interval between checks of the readiness probe
+ periodSeconds: 30
+
+ # -- Configure the kube-proxy replacement in Cilium BPF datapath
+ # Valid options are "true", "false", "disabled" (deprecated), "partial" (deprecated), "strict" (deprecated).
+ # ref: https://docs.cilium.io/en/stable/network/kubernetes/kubeproxy-free/
+ #kubeProxyReplacement: "false"
+
+ # -- healthz server bind address for the kube-proxy replacement.
+ # To enable set the value to '0.0.0.0:10256' for all ipv4
+ # addresses and this '[::]:10256' for all ipv6 addresses.
+ # By default it is disabled.
+ kubeProxyReplacementHealthzBindAddr: ""
+
+ l2NeighDiscovery:
+ # -- Enable L2 neighbor discovery in the agent
+ enabled: true
+ # -- Override the agent's default neighbor resolution refresh period.
+ refreshPeriod: "30s"
+
+ # -- Enable Layer 7 network policy.
+ l7Proxy: true
+
+ # -- Enable Local Redirect Policy.
+ localRedirectPolicy: false
+
+ # To include or exclude matched resources from cilium identity evaluation
+ # labels: ""
+
+ # logOptions allows you to define logging options. eg:
+ # logOptions:
+ # format: json
+
+ # -- Enables periodic logging of system load
+ logSystemLoad: false
+
+ # -- Configure maglev consistent hashing
+ maglev: {}
+ # -- tableSize is the size (parameter M) for the backend table of one
+ # service entry
+ # tableSize:
+
+ # -- hashSeed is the cluster-wide base64 encoded seed for the hashing
+ # hashSeed:
+
+ # -- Enables masquerading of IPv4 traffic leaving the node from endpoints.
+ enableIPv4Masquerade: true
+
+ # -- Enables masquerading of IPv6 traffic leaving the node from endpoints.
+ enableIPv6Masquerade: true
+
+ # -- Enables masquerading to the source of the route for traffic leaving the node from endpoints.
+ enableMasqueradeRouteSource: false
+
+ # -- Enables IPv4 BIG TCP support which increases maximum IPv4 GSO/GRO limits for nodes and pods
+ enableIPv4BIGTCP: false
+
+ # -- Enables IPv6 BIG TCP support which increases maximum IPv6 GSO/GRO limits for nodes and pods
+ enableIPv6BIGTCP: false
+
+ egressGateway:
+ # -- Enables egress gateway to redirect and SNAT the traffic that leaves the
+ # cluster.
+ enabled: false
+ # -- Deprecated without a replacement necessary.
+ installRoutes: false
+ # -- Time between triggers of egress gateway state reconciliations
+ reconciliationTriggerInterval: 1s
+ # -- Maximum number of entries in egress gateway policy map
+ # maxPolicyEntries: 16384
+
+ vtep:
+ # -- Enables VXLAN Tunnel Endpoint (VTEP) Integration (beta) to allow
+ # Cilium-managed pods to talk to third party VTEP devices over Cilium tunnel.
+ enabled: false
+
+ # -- A space separated list of VTEP device endpoint IPs, for example "1.1.1.1 1.1.2.1"
+ endpoint: ""
+ # -- A space separated list of VTEP device CIDRs, for example "1.1.1.0/24 1.1.2.0/24"
+ cidr: ""
+ # -- VTEP CIDRs Mask that applies to all VTEP CIDRs, for example "255.255.255.0"
+ mask: ""
+ # -- A space separated list of VTEP device MAC addresses (VTEP MAC), for example "x:x:x:x:x:x y:y:y:y:y:y:y"
+ mac: ""
+
+ # -- (string) Allows to explicitly specify the IPv4 CIDR for native routing.
+ # When specified, Cilium assumes networking for this CIDR is preconfigured and
+ # hands traffic destined for that range to the Linux network stack without
+ # applying any SNAT.
+ # Generally speaking, specifying a native routing CIDR implies that Cilium can
+ # depend on the underlying networking stack to route packets to their
+ # destination. To offer a concrete example, if Cilium is configured to use
+ # direct routing and the Kubernetes CIDR is included in the native routing CIDR,
+ # the user must configure the routes to reach pods, either manually or by
+ # setting the auto-direct-node-routes flag.
+ ipv4NativeRoutingCIDR: ""
+
+ # -- (string) Allows to explicitly specify the IPv6 CIDR for native routing.
+ # When specified, Cilium assumes networking for this CIDR is preconfigured and
+ # hands traffic destined for that range to the Linux network stack without
+ # applying any SNAT.
+ # Generally speaking, specifying a native routing CIDR implies that Cilium can
+ # depend on the underlying networking stack to route packets to their
+ # destination. To offer a concrete example, if Cilium is configured to use
+ # direct routing and the Kubernetes CIDR is included in the native routing CIDR,
+ # the user must configure the routes to reach pods, either manually or by
+ # setting the auto-direct-node-routes flag.
+ ipv6NativeRoutingCIDR: ""
+
+ # -- cilium-monitor sidecar.
+ monitor:
+ # -- Enable the cilium-monitor sidecar.
+ enabled: false
+
+ # -- Configure service load balancing
+ loadBalancer:
+ # -- standalone enables the standalone L4LB which does not connect to
+ # kube-apiserver.
+ # standalone: false
+
+ # -- algorithm is the name of the load balancing algorithm for backend
+ # selection e.g. random or maglev
+ # algorithm: random
+
+ # -- mode is the operation mode of load balancing for remote backends
+ # e.g. snat, dsr, hybrid
+ # mode: snat
+
+ # -- acceleration is the option to accelerate service handling via XDP
+ # Applicable values can be: disabled (do not use XDP), native (XDP BPF
+ # program is run directly out of the networking driver's early receive
+ # path), or best-effort (use native mode XDP acceleration on devices
+ # that support it).
+ acceleration: disabled
+
+ # -- dsrDispatch configures whether IP option or IPIP encapsulation is
+ # used to pass a service IP and port to remote backend
+ # dsrDispatch: opt
+
+ # -- serviceTopology enables K8s Topology Aware Hints -based service
+ # endpoints filtering
+ # serviceTopology: false
+
+ # -- L7 LoadBalancer
+ l7:
+ # -- Enable L7 service load balancing via envoy proxy.
+ # The request to a k8s service, which has specific annotation e.g. service.cilium.io/lb-l7,
+ # will be forwarded to the local backend proxy to be load balanced to the service endpoints.
+ # Please refer to docs for supported annotations for more configuration.
+ #
+ # Applicable values:
+ # - envoy: Enable L7 load balancing via envoy proxy. This will automatically set enable-envoy-config as well.
+ # - disabled: Disable L7 load balancing by way of service annotation.
+ backend: disabled
+ # -- List of ports from service to be automatically redirected to above backend.
+ # Any service exposing one of these ports will be automatically redirected.
+ # Fine-grained control can be achieved by using the service annotation.
+ ports: []
+ # -- Default LB algorithm
+ # The default LB algorithm to be used for services, which can be overridden by the
+ # service annotation (e.g. service.cilium.io/lb-l7-algorithm)
+ # Applicable values: round_robin, least_request, random
+ algorithm: round_robin
+
+ # -- Configure N-S k8s service loadbalancing
+ nodePort:
+ # -- Enable the Cilium NodePort service implementation.
+ enabled: false
+
+ # -- Port range to use for NodePort services.
+ # range: "30000,32767"
+
+ # -- Set to true to prevent applications binding to service ports.
+ bindProtection: true
+
+ # -- Append NodePort range to ip_local_reserved_ports if clash with ephemeral
+ # ports is detected.
+ autoProtectPortRange: true
+
+ # -- Enable healthcheck nodePort server for NodePort services
+ enableHealthCheck: true
+
+ # -- Enable access of the healthcheck nodePort on the LoadBalancerIP. Needs
+ # EnableHealthCheck to be enabled
+ enableHealthCheckLoadBalancerIP: false
+
+ # policyAuditMode: false
+
+ # -- The agent can be put into one of the three policy enforcement modes:
+ # default, always and never.
+ # ref: https://docs.cilium.io/en/stable/security/policy/intro/#policy-enforcement-modes
+ policyEnforcementMode: "default"
+
+ # -- policyCIDRMatchMode is a list of entities that may be selected by CIDR selector.
+ # The possible value is "nodes".
+ policyCIDRMatchMode:
+
+ pprof:
+ # -- Enable pprof for cilium-agent
+ enabled: false
+ # -- Configure pprof listen address for cilium-agent
+ address: localhost
+ # -- Configure pprof listen port for cilium-agent
+ port: 6060
+
+ # -- Configure prometheus metrics on the configured port at /metrics
+ prometheus:
+ enabled: false
+ port: 9962
+ serviceMonitor:
+ # -- Enable service monitors.
+ # This requires the prometheus CRDs to be available (see https://github.com/prometheus-operator/prometheus-operator/blob/main/example/prometheus-operator-crd/monitoring.coreos.com_servicemonitors.yaml)
+ enabled: false
+ # -- Labels to add to ServiceMonitor cilium-agent
+ labels: {}
+ # -- Annotations to add to ServiceMonitor cilium-agent
+ annotations: {}
+ # -- jobLabel to add for ServiceMonitor cilium-agent
+ jobLabel: ""
+ # -- Interval for scrape metrics.
+ interval: "10s"
+ # -- Specify the Kubernetes namespace where Prometheus expects to find
+ # service monitors configured.
+ # namespace: ""
+ # -- Relabeling configs for the ServiceMonitor cilium-agent
+ relabelings:
+ - sourceLabels:
+ - __meta_kubernetes_pod_node_name
+ targetLabel: node
+ replacement: ${1}
+ # -- Metrics relabeling configs for the ServiceMonitor cilium-agent
+ metricRelabelings: ~
+ # -- Set to `true` and helm will not check for monitoring.coreos.com/v1 CRDs before deploying
+ trustCRDsExist: false
+
+ # -- Metrics that should be enabled or disabled from the default metric list.
+ # The list is expected to be separated by a space. (+metric_foo to enable
+ # metric_foo , -metric_bar to disable metric_bar).
+ # ref: https://docs.cilium.io/en/stable/observability/metrics/
+ metrics: ~
+
+ # --- Enable controller group metrics for monitoring specific Cilium
+ # subsystems. The list is a list of controller group names. The special
+ # values of "all" and "none" are supported. The set of controller
+ # group names is not guaranteed to be stable between Cilium versions.
+ controllerGroupMetrics:
+ - write-cni-file
+ - sync-host-ips
+ - sync-lb-maps-with-k8s-services
+
+ # -- Grafana dashboards for cilium-agent
+ # grafana can import dashboards based on the label and value
+ # ref: https://github.com/grafana/helm-charts/tree/main/charts/grafana#sidecar-for-dashboards
+ dashboards:
+ enabled: false
+ label: grafana_dashboard
+ namespace: ~
+ labelValue: "1"
+ annotations: {}
+
+ # -- Configure Istio proxy options.
+ proxy:
+
+ prometheus:
+ # -- Deprecated in favor of envoy.prometheus.enabled
+ enabled: true
+ # -- Deprecated in favor of envoy.prometheus.port
+ port: ~
+ # -- Regular expression matching compatible Istio sidecar istio-proxy
+ # container image names
+ sidecarImageRegex: "cilium/istio_proxy"
+
+ # Configure Cilium Envoy options.
+ envoy:
+ # -- Enable Envoy Proxy in standalone DaemonSet.
+ enabled: false
+
+ log:
+ # -- The format string to use for laying out the log message metadata of Envoy.
+ format: "[%Y-%m-%d %T.%e][%t][%l][%n] [%g:%#] %v"
+ # -- Path to a separate Envoy log file, if any. Defaults to /dev/stdout.
+ path: ""
+
+ # -- Time in seconds after which a TCP connection attempt times out
+ connectTimeoutSeconds: 2
+ # -- ProxyMaxRequestsPerConnection specifies the max_requests_per_connection setting for Envoy
+ maxRequestsPerConnection: 0
+ # -- Set Envoy HTTP option max_connection_duration seconds. Default 0 (disable)
+ maxConnectionDurationSeconds: 0
+ # -- Set Envoy upstream HTTP idle connection timeout seconds.
+ # Does not apply to connections with pending requests. Default 60s
+ idleTimeoutDurationSeconds: 60
+
+ # -- Envoy container image.
+ image:
+ override: ~
+ repository: "quay.io/cilium/cilium-envoy"
+ tag: "v1.27.3-99c1c8f42c8de70fc8f6dd594f4a425cd38b6688"
+ pullPolicy: "IfNotPresent"
+ digest: "sha256:877ead12d08d4c04a9f67f86d3c6e542aeb7bf97e1e401aee74de456f496ac30"
+ useDigest: true
+
+ # -- Additional containers added to the cilium Envoy DaemonSet.
+ extraContainers: []
+
+ # -- Additional envoy container arguments.
+ extraArgs: []
+
+ # -- Additional envoy container environment variables.
+ extraEnv: []
+
+ # -- Additional envoy hostPath mounts.
+ extraHostPathMounts: []
+ # - name: host-mnt-data
+ # mountPath: /host/mnt/data
+ # hostPath: /mnt/data
+ # hostPathType: Directory
+ # readOnly: true
+ # mountPropagation: HostToContainer
+
+ # -- Additional envoy volumes.
+ extraVolumes: []
+
+ # -- Additional envoy volumeMounts.
+ extraVolumeMounts: []
+
+ # -- Configure termination grace period for cilium-envoy DaemonSet.
+ terminationGracePeriodSeconds: 1
+
+ # -- TCP port for the health API.
+ healthPort: 9878
+
+ # -- cilium-envoy update strategy
+ # ref: https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/#updating-a-daemonset
+ updateStrategy:
+ type: RollingUpdate
+ rollingUpdate:
+ maxUnavailable: 2
+ # -- Roll out cilium envoy pods automatically when configmap is updated.
+ rollOutPods: false
+
+ # -- Annotations to be added to all top-level cilium-envoy objects (resources under templates/cilium-envoy)
+ annotations: {}
+
+ # -- Security Context for cilium-envoy pods.
+ podSecurityContext: {}
+
+ # -- Annotations to be added to envoy pods
+ podAnnotations: {}
+
+ # -- Labels to be added to envoy pods
+ podLabels: {}
+
+ # -- Envoy resource limits & requests
+ # ref: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
+ resources: {}
+ # limits:
+ # cpu: 4000m
+ # memory: 4Gi
+ # requests:
+ # cpu: 100m
+ # memory: 512Mi
+
+ startupProbe:
+ # -- failure threshold of startup probe.
+ # 105 x 2s translates to the old behaviour of the readiness probe (120s delay + 30 x 3s)
+ failureThreshold: 105
+ # -- interval between checks of the startup probe
+ periodSeconds: 2
+ livenessProbe:
+ # -- failure threshold of liveness probe
+ failureThreshold: 10
+ # -- interval between checks of the liveness probe
+ periodSeconds: 30
+ readinessProbe:
+ # -- failure threshold of readiness probe
+ failureThreshold: 3
+ # -- interval between checks of the readiness probe
+ periodSeconds: 30
+
+ securityContext:
+ # -- User to run the pod with
+ # runAsUser: 0
+ # -- Run the pod with elevated privileges
+ privileged: false
+ # -- SELinux options for the `cilium-envoy` container
+ seLinuxOptions:
+ level: 's0'
+ # Running with spc_t since we have removed the privileged mode.
+ # Users can change it to a different type as long as they have the
+ # type available on the system.
+ type: 'spc_t'
+ capabilities:
+ # -- Capabilities for the `cilium-envoy` container
+ envoy:
+ # Used since cilium proxy uses setting IPPROTO_IP/IP_TRANSPARENT
+ - NET_ADMIN
+ # We need it for now but might not need it for >= 5.11 specially
+ # for the 'SYS_RESOURCE'.
+ # In >= 5.8 there's already BPF and PERMON capabilities
+ - SYS_ADMIN
+ # Both PERFMON and BPF requires kernel 5.8, container runtime
+ # cri-o >= v1.22.0 or containerd >= v1.5.0.
+ # If available, SYS_ADMIN can be removed.
+ #- PERFMON
+ #- BPF
+
+ # -- Affinity for cilium-envoy.
+ affinity:
+ podAntiAffinity:
+ requiredDuringSchedulingIgnoredDuringExecution:
+ - topologyKey: kubernetes.io/hostname
+ labelSelector:
+ matchLabels:
+ k8s-app: cilium-envoy
+ podAffinity:
+ requiredDuringSchedulingIgnoredDuringExecution:
+ - topologyKey: kubernetes.io/hostname
+ labelSelector:
+ matchLabels:
+ k8s-app: cilium
+ nodeAffinity:
+ requiredDuringSchedulingIgnoredDuringExecution:
+ nodeSelectorTerms:
+ - matchExpressions:
+ - key: cilium.io/no-schedule
+ operator: NotIn
+ values:
+ - "true"
+ # -- Node selector for cilium-envoy.
+ nodeSelector:
+ kubernetes.io/os: linux
+
+ # -- Node tolerations for envoy scheduling to nodes with taints
+ # ref: https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/
+ tolerations:
+ - operator: Exists
+ # - key: "key"
+ # operator: "Equal|Exists"
+ # value: "value"
+ # effect: "NoSchedule|PreferNoSchedule|NoExecute(1.6 only)"
+
+ # -- The priority class to use for cilium-envoy.
+ priorityClassName: ~
+
+ # -- DNS policy for Cilium envoy pods.
+ # Ref: https://kubernetes.io/docs/concepts/services-networking/dns-pod-service/#pod-s-dns-policy
+ dnsPolicy: ~
+
+ # -- Configure Cilium Envoy Prometheus options.
+ # Note that some of these apply to either cilium-agent or cilium-envoy.
+ prometheus:
+ # -- Enable prometheus metrics for cilium-envoy
+ enabled: true
+ serviceMonitor:
+ # -- Enable service monitors.
+ # This requires the prometheus CRDs to be available (see https://github.com/prometheus-operator/prometheus-operator/blob/main/example/prometheus-operator-crd/monitoring.coreos.com_servicemonitors.yaml)
+ # Note that this setting applies to both cilium-envoy _and_ cilium-agent
+ # with Envoy enabled.
+ enabled: false
+ # -- Labels to add to ServiceMonitor cilium-envoy
+ labels: {}
+ # -- Annotations to add to ServiceMonitor cilium-envoy
+ annotations: {}
+ # -- Interval for scrape metrics.
+ interval: "10s"
+ # -- Specify the Kubernetes namespace where Prometheus expects to find
+ # service monitors configured.
+ # namespace: ""
+ # -- Relabeling configs for the ServiceMonitor cilium-envoy
+ # or for cilium-agent with Envoy configured.
+ relabelings:
+ - sourceLabels:
+ - __meta_kubernetes_pod_node_name
+ targetLabel: node
+ replacement: ${1}
+ # -- Metrics relabeling configs for the ServiceMonitor cilium-envoy
+ # or for cilium-agent with Envoy configured.
+ metricRelabelings: ~
+ # -- Serve prometheus metrics for cilium-envoy on the configured port
+ port: "9964"
+
+ # -- Enable use of the remote node identity.
+ # ref: https://docs.cilium.io/en/v1.7/install/upgrade/#configmap-remote-node-identity
+ # Deprecated without replacement in 1.15. To be removed in 1.16.
+ remoteNodeIdentity: true
+
+ # -- Enable resource quotas for priority classes used in the cluster.
+ resourceQuotas:
+ enabled: false
+ cilium:
+ hard:
+ # 5k nodes * 2 DaemonSets (Cilium and cilium node init)
+ pods: "10k"
+ operator:
+ hard:
+ # 15 "clusterwide" Cilium Operator pods for HA
+ pods: "15"
+
+ # Need to document default
+ ##################
+ #sessionAffinity: false
+
+ # -- Do not run Cilium agent when running with clean mode. Useful to completely
+ # uninstall Cilium as it will stop Cilium from starting and create artifacts
+ # in the node.
+ sleepAfterInit: false
+
+ # -- Enable check of service source ranges (currently, only for LoadBalancer).
+ svcSourceRangeCheck: true
+
+ # -- Synchronize Kubernetes nodes to kvstore and perform CNP GC.
+ synchronizeK8sNodes: true
+
+ # -- Configure TLS configuration in the agent.
+ tls:
+ # -- This configures how the Cilium agent loads the secrets used TLS-aware CiliumNetworkPolicies
+ # (namely the secrets referenced by terminatingTLS and originatingTLS).
+ # Possible values:
+ # - local
+ # - k8s
+ secretsBackend: local
+
+ # -- Base64 encoded PEM values for the CA certificate and private key.
+ # This can be used as common CA to generate certificates used by hubble and clustermesh components.
+ # It is neither required nor used when cert-manager is used to generate the certificates.
+ ca:
+ # -- Optional CA cert. If it is provided, it will be used by cilium to
+ # generate all other certificates. Otherwise, an ephemeral CA is generated.
+ cert: ""
+
+ # -- Optional CA private key. If it is provided, it will be used by cilium to
+ # generate all other certificates. Otherwise, an ephemeral CA is generated.
+ key: ""
+
+ # -- Generated certificates validity duration in days. This will be used for auto generated CA.
+ certValidityDuration: 1095
+
+ # -- Configure the CA trust bundle used for the validation of the certificates
+ # leveraged by hubble and clustermesh. When enabled, it overrides the content of the
+ # 'ca.crt' field of the respective certificates, allowing for CA rotation with no down-time.
+ caBundle:
+ # -- Enable the use of the CA trust bundle.
+ enabled: false
+
+ # -- Name of the ConfigMap containing the CA trust bundle.
+ name: cilium-root-ca.crt
+
+ # -- Entry of the ConfigMap containing the CA trust bundle.
+ key: ca.crt
+
+ # -- Use a Secret instead of a ConfigMap.
+ useSecret: false
+
+ # If uncommented, creates the ConfigMap and fills it with the specified content.
+ # Otherwise, the ConfigMap is assumed to be already present in .Release.Namespace.
+ #
+ # content: |
+ # -----BEGIN CERTIFICATE-----
+ # ...
+ # -----END CERTIFICATE-----
+ # -----BEGIN CERTIFICATE-----
+ # ...
+ # -----END CERTIFICATE-----
+
+ # -- Tunneling protocol to use in tunneling mode and for ad-hoc tunnels.
+ # Possible values:
+ # - ""
+ # - vxlan
+ # - geneve
+ # @default -- `"vxlan"`
+ tunnelProtocol: ""
+
+ # -- Enable native-routing mode or tunneling mode.
+ # Possible values:
+ # - ""
+ # - native
+ # - tunnel
+ # @default -- `"tunnel"`
+ routingMode: ""
+
+ # -- Configure VXLAN and Geneve tunnel port.
+ # @default -- Port 8472 for VXLAN, Port 6081 for Geneve
+ tunnelPort: 0
+
+ # -- Configure what the response should be to traffic for a service without backends.
+ # "reject" only works on kernels >= 5.10, on lower kernels we fallback to "drop".
+ # Possible values:
+ # - reject (default)
+ # - drop
+ serviceNoBackendResponse: reject
+
+ # -- Configure the underlying network MTU to overwrite auto-detected MTU.
+ MTU: 0
+
+ # -- Disable the usage of CiliumEndpoint CRD.
+ disableEndpointCRD: false
+
+ wellKnownIdentities:
+ # -- Enable the use of well-known identities.
+ enabled: false
+
+ etcd:
+ # -- Enable etcd mode for the agent.
+ enabled: false
+
+ # -- cilium-etcd-operator image.
+ image:
+ override: ~
+ repository: "quay.io/cilium/cilium-etcd-operator"
+ tag: "v2.0.7"
+ digest: "sha256:04b8327f7f992693c2cb483b999041ed8f92efc8e14f2a5f3ab95574a65ea2dc"
+ useDigest: true
+ pullPolicy: "IfNotPresent"
+
+ # -- The priority class to use for cilium-etcd-operator
+ priorityClassName: ""
+
+ # -- Additional cilium-etcd-operator container arguments.
+ extraArgs: []
+
+ # -- Additional cilium-etcd-operator volumes.
+ extraVolumes: []
+
+ # -- Additional cilium-etcd-operator volumeMounts.
+ extraVolumeMounts: []
+
+ # -- Node tolerations for cilium-etcd-operator scheduling to nodes with taints
+ # ref: https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/
+ tolerations:
+ - operator: Exists
+ # - key: "key"
+ # operator: "Equal|Exists"
+ # value: "value"
+ # effect: "NoSchedule|PreferNoSchedule|NoExecute(1.6 only)"
+
+ # -- Pod topology spread constraints for cilium-etcd-operator
+ topologySpreadConstraints: []
+ # - maxSkew: 1
+ # topologyKey: topology.kubernetes.io/zone
+ # whenUnsatisfiable: DoNotSchedule
+
+ # -- Node labels for cilium-etcd-operator pod assignment
+ # ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodeselector
+ nodeSelector:
+ kubernetes.io/os: linux
+
+ # -- Annotations to be added to all top-level etcd-operator objects (resources under templates/etcd-operator)
+ annotations: {}
+
+ # -- Security context to be added to cilium-etcd-operator pods
+ podSecurityContext: {}
+
+ # -- Annotations to be added to cilium-etcd-operator pods
+ podAnnotations: {}
+
+ # -- Labels to be added to cilium-etcd-operator pods
+ podLabels: {}
+
+ # PodDisruptionBudget settings
+ podDisruptionBudget:
+ # -- enable PodDisruptionBudget
+ # ref: https://kubernetes.io/docs/concepts/workloads/pods/disruptions/
+ enabled: false
+ # -- Minimum number/percentage of pods that should remain scheduled.
+ # When it's set, maxUnavailable must be disabled by `maxUnavailable: null`
+ minAvailable: null
+ # -- Maximum number/percentage of pods that may be made unavailable
+ maxUnavailable: 1
+
+ # -- cilium-etcd-operator resource limits & requests
+ # ref: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
+ resources: {}
+ # limits:
+ # cpu: 4000m
+ # memory: 4Gi
+ # requests:
+ # cpu: 100m
+ # memory: 512Mi
+
+ # -- Security context to be added to cilium-etcd-operator pods
+ securityContext: {}
+ # runAsUser: 0
+
+ # -- cilium-etcd-operator update strategy
+ updateStrategy:
+ type: RollingUpdate
+ rollingUpdate:
+ maxSurge: 1
+ maxUnavailable: 1
+
+ # -- If etcd is behind a k8s service set this option to true so that Cilium
+ # does the service translation automatically without requiring a DNS to be
+ # running.
+ k8sService: false
+
+ # -- Cluster domain for cilium-etcd-operator.
+ clusterDomain: cluster.local
+
+ # -- List of etcd endpoints (not needed when using managed=true).
+ endpoints:
+ - https://CHANGE-ME:2379
+
+ # -- Enable use of TLS/SSL for connectivity to etcd. (auto-enabled if
+ # managed=true)
+ ssl: false
+
+ operator:
+ # -- Enable the cilium-operator component (required).
+ enabled: true
+
+ # -- Roll out cilium-operator pods automatically when configmap is updated.
+ rollOutPods: false
+
+ # -- cilium-operator image.
+ image:
+ override: ~
+ repository: "quay.io/cilium/operator"
+ tag: "v1.15.3"
+ # operator-generic-digest
+ genericDigest: ""
+ # operator-azure-digest
+ azureDigest: ""
+ # operator-aws-digest
+ awsDigest: ""
+ # operator-alibabacloud-digest
+ alibabacloudDigest: ""
+ useDigest: false
+ pullPolicy: "IfNotPresent"
+ suffix: ""
+
+ # -- Number of replicas to run for the cilium-operator deployment
+ replicas: 2
+
+ # -- The priority class to use for cilium-operator
+ priorityClassName: ""
+
+ # -- DNS policy for Cilium operator pods.
+ # Ref: https://kubernetes.io/docs/concepts/services-networking/dns-pod-service/#pod-s-dns-policy
+ dnsPolicy: ""
+
+ # -- cilium-operator update strategy
+ updateStrategy:
+ type: RollingUpdate
+ rollingUpdate:
+ maxSurge: 25%
+ maxUnavailable: 50%
+
+ # -- Affinity for cilium-operator
+ affinity:
+ podAntiAffinity:
+ requiredDuringSchedulingIgnoredDuringExecution:
+ - topologyKey: kubernetes.io/hostname
+ labelSelector:
+ matchLabels:
+ io.cilium/app: operator
+
+ # -- Pod topology spread constraints for cilium-operator
+ topologySpreadConstraints: []
+ # - maxSkew: 1
+ # topologyKey: topology.kubernetes.io/zone
+ # whenUnsatisfiable: DoNotSchedule
+
+ # -- Node labels for cilium-operator pod assignment
+ # ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodeselector
+ nodeSelector:
+ kubernetes.io/os: linux
+
+ # -- Node tolerations for cilium-operator scheduling to nodes with taints
+ # ref: https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/
+ tolerations:
+ - operator: Exists
+ # - key: "key"
+ # operator: "Equal|Exists"
+ # value: "value"
+ # effect: "NoSchedule|PreferNoSchedule|NoExecute(1.6 only)"
+
+ # -- Additional cilium-operator container arguments.
+ extraArgs: []
+
+ # -- Additional cilium-operator environment variables.
+ extraEnv: []
+
+ # -- Additional cilium-operator hostPath mounts.
+ extraHostPathMounts: []
+ # - name: host-mnt-data
+ # mountPath: /host/mnt/data
+ # hostPath: /mnt/data
+ # hostPathType: Directory
+ # readOnly: true
+ # mountPropagation: HostToContainer
+
+ # -- Additional cilium-operator volumes.
+ extraVolumes: []
+
+ # -- Additional cilium-operator volumeMounts.
+ extraVolumeMounts: []
+
+ # -- Annotations to be added to all top-level cilium-operator objects (resources under templates/cilium-operator)
+ annotations: {}
+
+ # -- Security context to be added to cilium-operator pods
+ podSecurityContext: {}
+
+ # -- Annotations to be added to cilium-operator pods
+ podAnnotations: {}
+
+ # -- Labels to be added to cilium-operator pods
+ podLabels: {}
+
+ # PodDisruptionBudget settings
+ podDisruptionBudget:
+ # -- enable PodDisruptionBudget
+ # ref: https://kubernetes.io/docs/concepts/workloads/pods/disruptions/
+ enabled: false
+ # -- Minimum number/percentage of pods that should remain scheduled.
+ # When it's set, maxUnavailable must be disabled by `maxUnavailable: null`
+ minAvailable: null
+ # -- Maximum number/percentage of pods that may be made unavailable
+ maxUnavailable: 1
+
+ # -- cilium-operator resource limits & requests
+ # ref: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
+ resources: {}
+ # limits:
+ # cpu: 1000m
+ # memory: 1Gi
+ # requests:
+ # cpu: 100m
+ # memory: 128Mi
+
+ # -- Security context to be added to cilium-operator pods
+ securityContext: {}
+ # runAsUser: 0
+
+ # -- Interval for endpoint garbage collection.
+ endpointGCInterval: "5m0s"
+
+ # -- Interval for cilium node garbage collection.
+ nodeGCInterval: "5m0s"
+
+ # -- Skip CNP node status clean up at operator startup.
+ skipCNPStatusStartupClean: false
+
+ # -- Interval for identity garbage collection.
+ identityGCInterval: "15m0s"
+
+ # -- Timeout for identity heartbeats.
+ identityHeartbeatTimeout: "30m0s"
+
+ pprof:
+ # -- Enable pprof for cilium-operator
+ enabled: false
+ # -- Configure pprof listen address for cilium-operator
+ address: localhost
+ # -- Configure pprof listen port for cilium-operator
+ port: 6061
+
+ # -- Enable prometheus metrics for cilium-operator on the configured port at
+ # /metrics
+ prometheus:
+ enabled: true
+ port: 9963
+ serviceMonitor:
+ # -- Enable service monitors.
+ # This requires the prometheus CRDs to be available (see https://github.com/prometheus-operator/prometheus-operator/blob/main/example/prometheus-operator-crd/monitoring.coreos.com_servicemonitors.yaml)
+ enabled: false
+ # -- Labels to add to ServiceMonitor cilium-operator
+ labels: {}
+ # -- Annotations to add to ServiceMonitor cilium-operator
+ annotations: {}
+ # -- jobLabel to add for ServiceMonitor cilium-operator
+ jobLabel: ""
+ # -- Interval for scrape metrics.
+ interval: "10s"
+ # -- Relabeling configs for the ServiceMonitor cilium-operator
+ relabelings: ~
+ # -- Metrics relabeling configs for the ServiceMonitor cilium-operator
+ metricRelabelings: ~
+
+ # -- Grafana dashboards for cilium-operator
+ # grafana can import dashboards based on the label and value
+ # ref: https://github.com/grafana/helm-charts/tree/main/charts/grafana#sidecar-for-dashboards
+ dashboards:
+ enabled: false
+ label: grafana_dashboard
+ namespace: ~
+ labelValue: "1"
+ annotations: {}
+
+ # -- Skip CRDs creation for cilium-operator
+ skipCRDCreation: false
+
+ # -- Remove Cilium node taint from Kubernetes nodes that have a healthy Cilium
+ # pod running.
+ removeNodeTaints: true
+
+ # -- Taint nodes where Cilium is scheduled but not running. This prevents pods
+ # from being scheduled to nodes where Cilium is not the default CNI provider.
+ # @default -- same as removeNodeTaints
+ setNodeTaints: ~
+
+ # -- Set Node condition NetworkUnavailable to 'false' with the reason
+ # 'CiliumIsUp' for nodes that have a healthy Cilium pod.
+ setNodeNetworkStatus: true
+
+ unmanagedPodWatcher:
+ # -- Restart any pod that are not managed by Cilium.
+ restart: true
+ # -- Interval, in seconds, to check if there are any pods that are not
+ # managed by Cilium.
+ intervalSeconds: 15
+
+ nodeinit:
+ # -- Enable the node initialization DaemonSet
+ enabled: false
+
+ # -- node-init image.
+ image:
+ override: ~
+ repository: "quay.io/cilium/startup-script"
+ tag: "62093c5c233ea914bfa26a10ba41f8780d9b737f"
+ pullPolicy: "IfNotPresent"
+
+ # -- The priority class to use for the nodeinit pod.
+ priorityClassName: ""
+
+ # -- node-init update strategy
+ updateStrategy:
+ type: RollingUpdate
+
+ # -- Additional nodeinit environment variables.
+ extraEnv: []
+
+ # -- Additional nodeinit volumes.
+ extraVolumes: []
+
+ # -- Additional nodeinit volumeMounts.
+ extraVolumeMounts: []
+
+ # -- Affinity for cilium-nodeinit
+ affinity: {}
+
+ # -- Node labels for nodeinit pod assignment
+ # ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodeselector
+ nodeSelector:
+ kubernetes.io/os: linux
+
+ # -- Node tolerations for nodeinit scheduling to nodes with taints
+ # ref: https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/
+ tolerations:
+ - operator: Exists
+ # - key: "key"
+ # operator: "Equal|Exists"
+ # value: "value"
+ # effect: "NoSchedule|PreferNoSchedule|NoExecute(1.6 only)"
+
+ # -- Annotations to be added to all top-level nodeinit objects (resources under templates/cilium-nodeinit)
+ annotations: {}
+
+ # -- Annotations to be added to node-init pods.
+ podAnnotations: {}
+
+ # -- Labels to be added to node-init pods.
+ podLabels: {}
+
+ # -- nodeinit resource limits & requests
+ # ref: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
+ resources:
+ requests:
+ cpu: 100m
+ memory: 100Mi
+
+ # -- Security context to be added to nodeinit pods.
+ securityContext:
+ privileged: false
+ seLinuxOptions:
+ level: 's0'
+ # Running with spc_t since we have removed the privileged mode.
+ # Users can change it to a different type as long as they have the
+ # type available on the system.
+ type: 'spc_t'
+ capabilities:
+ add:
+ # Used in iptables. Consider removing once we are iptables-free
+ - SYS_MODULE
+ # Used for nsenter
+ - NET_ADMIN
+ - SYS_ADMIN
+ - SYS_CHROOT
+ - SYS_PTRACE
+
+ # -- bootstrapFile is the location of the file where the bootstrap timestamp is
+ # written by the node-init DaemonSet
+ bootstrapFile: "/tmp/cilium-bootstrap.d/cilium-bootstrap-time"
+
+ # -- startup offers way to customize startup nodeinit script (pre and post position)
+ startup:
+ preScript: ""
+ postScript: ""
+ # -- prestop offers way to customize prestop nodeinit script (pre and post position)
+ prestop:
+ preScript: ""
+ postScript: ""
+
+ preflight:
+ # -- Enable Cilium pre-flight resources (required for upgrade)
+ enabled: false
+
+ # -- Cilium pre-flight image.
+ image:
+ override: ~
+ repository: "quay.io/cilium/cilium"
+ tag: "v1.15.3"
+ # cilium-digest
+ digest: ""
+ useDigest: false
+ pullPolicy: "IfNotPresent"
+
+ # -- The priority class to use for the preflight pod.
+ priorityClassName: ""
+
+ # -- preflight update strategy
+ updateStrategy:
+ type: RollingUpdate
+
+ # -- Additional preflight environment variables.
+ extraEnv: []
+
+ # -- Additional preflight volumes.
+ extraVolumes: []
+
+ # -- Additional preflight volumeMounts.
+ extraVolumeMounts: []
+
+ # -- Affinity for cilium-preflight
+ affinity:
+ podAffinity:
+ requiredDuringSchedulingIgnoredDuringExecution:
+ - topologyKey: kubernetes.io/hostname
+ labelSelector:
+ matchLabels:
+ k8s-app: cilium
+
+ # -- Node labels for preflight pod assignment
+ # ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodeselector
+ nodeSelector:
+ kubernetes.io/os: linux
+
+ # -- Node tolerations for preflight scheduling to nodes with taints
+ # ref: https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/
+ tolerations:
+ - key: node.kubernetes.io/not-ready
+ effect: NoSchedule
+ - key: node-role.kubernetes.io/master
+ effect: NoSchedule
+ - key: node-role.kubernetes.io/control-plane
+ effect: NoSchedule
+ - key: node.cloudprovider.kubernetes.io/uninitialized
+ effect: NoSchedule
+ value: "true"
+ - key: CriticalAddonsOnly
+ operator: "Exists"
+ # - key: "key"
+ # operator: "Equal|Exists"
+ # value: "value"
+ # effect: "NoSchedule|PreferNoSchedule|NoExecute(1.6 only)"
+
+ # -- Annotations to be added to all top-level preflight objects (resources under templates/cilium-preflight)
+ annotations: {}
+
+ # -- Security context to be added to preflight pods.
+ podSecurityContext: {}
+
+ # -- Annotations to be added to preflight pods
+ podAnnotations: {}
+
+ # -- Labels to be added to the preflight pod.
+ podLabels: {}
+
+ # PodDisruptionBudget settings
+ podDisruptionBudget:
+ # -- enable PodDisruptionBudget
+ # ref: https://kubernetes.io/docs/concepts/workloads/pods/disruptions/
+ enabled: false
+ # -- Minimum number/percentage of pods that should remain scheduled.
+ # When it's set, maxUnavailable must be disabled by `maxUnavailable: null`
+ minAvailable: null
+ # -- Maximum number/percentage of pods that may be made unavailable
+ maxUnavailable: 1
+
+ # -- preflight resource limits & requests
+ # ref: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
+ resources: {}
+ # limits:
+ # cpu: 4000m
+ # memory: 4Gi
+ # requests:
+ # cpu: 100m
+ # memory: 512Mi
+
+ # -- Security context to be added to preflight pods
+ securityContext: {}
+ # runAsUser: 0
+
+ # -- Path to write the `--tofqdns-pre-cache` file to.
+ tofqdnsPreCache: ""
+
+ # -- Configure termination grace period for preflight Deployment and DaemonSet.
+ terminationGracePeriodSeconds: 1
+
+ # -- By default we should always validate the installed CNPs before upgrading
+ # Cilium. This will make sure the user will have the policies deployed in the
+ # cluster with the right schema.
+ validateCNPs: true
+
+ # -- Explicitly enable or disable priority class.
+ # .Capabilities.KubeVersion is unsettable in `helm template` calls,
+ # it depends on k8s libraries version that Helm was compiled against.
+ # This option allows to explicitly disable setting the priority class, which
+ # is useful for rendering charts for gke clusters in advance.
+ enableCriticalPriorityClass: true
+
+ # disableEnvoyVersionCheck removes the check for Envoy, which can be useful
+ # on AArch64 as the images do not currently ship a version of Envoy.
+ #disableEnvoyVersionCheck: false
+
+ clustermesh:
+ # -- Deploy clustermesh-apiserver for clustermesh
+ useAPIServer: false
+ # -- The maximum number of clusters to support in a ClusterMesh. This value
+ # cannot be changed on running clusters, and all clusters in a ClusterMesh
+ # must be configured with the same value. Values > 255 will decrease the
+ # maximum allocatable cluster-local identities.
+ # Supported values are 255 and 511.
+ maxConnectedClusters: 255
+
+ # -- Annotations to be added to all top-level clustermesh objects (resources under templates/clustermesh-apiserver and templates/clustermesh-config)
+ annotations: {}
+
+ # -- Clustermesh explicit configuration.
+ config:
+ # -- Enable the Clustermesh explicit configuration.
+ enabled: false
+ # -- Default dns domain for the Clustermesh API servers
+ # This is used in the case cluster addresses are not provided
+ # and IPs are used.
+ domain: mesh.cilium.io
+ # -- List of clusters to be peered in the mesh.
+ clusters: []
+ # clusters:
+ # # -- Name of the cluster
+ # - name: cluster1
+ # # -- Address of the cluster, use this if you created DNS records for
+ # # the cluster Clustermesh API server.
+ # address: cluster1.mesh.cilium.io
+ # # -- Port of the cluster Clustermesh API server.
+ # port: 2379
+ # # -- IPs of the cluster Clustermesh API server, use multiple ones when
+ # # you have multiple IPs to access the Clustermesh API server.
+ # ips:
+ # - 172.18.255.201
+ # # -- base64 encoded PEM values for the cluster client certificate, private key and certificate authority.
+ # # These fields can (and should) be omitted in case the CA is shared across clusters. In that case, the
+ # # "remote" private key and certificate available in the local cluster are automatically used instead.
+ # tls:
+ # cert: ""
+ # key: ""
+ # caCert: ""
+
+ apiserver:
+ # -- Clustermesh API server image.
+ image:
+ override: ~
+ repository: "quay.io/cilium/clustermesh-apiserver"
+ tag: "v1.15.3"
+ # clustermesh-apiserver-digest
+ digest: ""
+ useDigest: false
+ pullPolicy: "IfNotPresent"
+
+ etcd:
+ # The etcd binary is included in the clustermesh API server image, so the same image from above is reused.
+ # Independent override isn't supported, because clustermesh-apiserver is tested against the etcd version it is
+ # built with.
+
+ # -- Specifies the resources for etcd container in the apiserver
+ resources: {}
+ # requests:
+ # cpu: 200m
+ # memory: 256Mi
+ # limits:
+ # cpu: 1000m
+ # memory: 256Mi
+
+ # -- Security context to be added to clustermesh-apiserver etcd containers
+ securityContext: {}
+
+ # -- lifecycle setting for the etcd container
+ lifecycle: {}
+
+ init:
+ # -- Specifies the resources for etcd init container in the apiserver
+ resources: {}
+ # requests:
+ # cpu: 100m
+ # memory: 100Mi
+ # limits:
+ # cpu: 100m
+ # memory: 100Mi
+
+ # -- Additional arguments to `clustermesh-apiserver etcdinit`.
+ extraArgs: []
+
+ # -- Additional environment variables to `clustermesh-apiserver etcdinit`.
+ extraEnv: []
+
+ kvstoremesh:
+ # -- Enable KVStoreMesh. KVStoreMesh caches the information retrieved
+ # from the remote clusters in the local etcd instance.
+ enabled: false
+
+ # -- Additional KVStoreMesh arguments.
+ extraArgs: []
+
+ # -- Additional KVStoreMesh environment variables.
+ extraEnv: []
+
+ # -- Resource requests and limits for the KVStoreMesh container
+ resources: {}
+ # requests:
+ # cpu: 100m
+ # memory: 64Mi
+ # limits:
+ # cpu: 1000m
+ # memory: 1024M
+
+ # -- Additional KVStoreMesh volumeMounts.
+ extraVolumeMounts: []
+
+ # -- KVStoreMesh Security context
+ securityContext:
+ allowPrivilegeEscalation: false
+ capabilities:
+ drop:
+ - ALL
+
+ # -- lifecycle setting for the KVStoreMesh container
+ lifecycle: {}
+
+ service:
+ # -- The type of service used for apiserver access.
+ type: NodePort
+ # -- Optional port to use as the node port for apiserver access.
+ #
+ # WARNING: make sure to configure a different NodePort in each cluster if
+ # kube-proxy replacement is enabled, as Cilium is currently affected by a known
+ # bug (#24692) when NodePorts are handled by the KPR implementation. If a service
+ # with the same NodePort exists both in the local and the remote cluster, all
+ # traffic originating from inside the cluster and targeting the corresponding
+ # NodePort will be redirected to a local backend, regardless of whether the
+ # destination node belongs to the local or the remote cluster.
+ nodePort: 32379
+ # -- Optional loadBalancer IP address to use with type LoadBalancer.
+ # loadBalancerIP:
+
+ # -- Annotations for the clustermesh-apiserver
+ # For GKE LoadBalancer, use annotation cloud.google.com/load-balancer-type: "Internal"
+ # For EKS LoadBalancer, use annotation service.beta.kubernetes.io/aws-load-balancer-internal: 0.0.0.0/0
+ annotations: {}
+
+ # -- The externalTrafficPolicy of service used for apiserver access.
+ externalTrafficPolicy:
+
+ # -- The internalTrafficPolicy of service used for apiserver access.
+ internalTrafficPolicy:
+
+ # -- Number of replicas run for the clustermesh-apiserver deployment.
+ replicas: 1
+
+ # -- lifecycle setting for the apiserver container
+ lifecycle: {}
+
+ # -- terminationGracePeriodSeconds for the clustermesh-apiserver deployment
+ terminationGracePeriodSeconds: 30
+
+ # -- Additional clustermesh-apiserver arguments.
+ extraArgs: []
+
+ # -- Additional clustermesh-apiserver environment variables.
+ extraEnv: []
+
+ # -- Additional clustermesh-apiserver volumes.
+ extraVolumes: []
+
+ # -- Additional clustermesh-apiserver volumeMounts.
+ extraVolumeMounts: []
+
+ # -- Security context to be added to clustermesh-apiserver containers
+ securityContext: {}
+
+ # -- Security context to be added to clustermesh-apiserver pods
+ podSecurityContext: {}
+
+ # -- Annotations to be added to clustermesh-apiserver pods
+ podAnnotations: {}
+
+ # -- Labels to be added to clustermesh-apiserver pods
+ podLabels: {}
+
+ # PodDisruptionBudget settings
+ podDisruptionBudget:
+ # -- enable PodDisruptionBudget
+ # ref: https://kubernetes.io/docs/concepts/workloads/pods/disruptions/
+ enabled: false
+ # -- Minimum number/percentage of pods that should remain scheduled.
+ # When it's set, maxUnavailable must be disabled by `maxUnavailable: null`
+ minAvailable: null
+ # -- Maximum number/percentage of pods that may be made unavailable
+ maxUnavailable: 1
+
+ # -- Resource requests and limits for the clustermesh-apiserver container of the clustermesh-apiserver deployment, such as
+ # resources:
+ # limits:
+ # cpu: 1000m
+ # memory: 1024M
+ # requests:
+ # cpu: 100m
+ # memory: 64Mi
+ # -- Resource requests and limits for the clustermesh-apiserver
+ resources: {}
+ # requests:
+ # cpu: 100m
+ # memory: 64Mi
+ # limits:
+ # cpu: 1000m
+ # memory: 1024M
+
+ # -- Affinity for clustermesh.apiserver
+ affinity:
+ podAntiAffinity:
+ requiredDuringSchedulingIgnoredDuringExecution:
+ - topologyKey: kubernetes.io/hostname
+ labelSelector:
+ matchLabels:
+ k8s-app: clustermesh-apiserver
+
+ # -- Pod topology spread constraints for clustermesh-apiserver
+ topologySpreadConstraints: []
+ # - maxSkew: 1
+ # topologyKey: topology.kubernetes.io/zone
+ # whenUnsatisfiable: DoNotSchedule
+
+ # -- Node labels for pod assignment
+ # ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodeselector
+ nodeSelector:
+ kubernetes.io/os: linux
+
+ # -- Node tolerations for pod assignment on nodes with taints
+ # ref: https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/
+ tolerations: []
+
+ # -- clustermesh-apiserver update strategy
+ updateStrategy:
+ type: RollingUpdate
+ rollingUpdate:
+ maxUnavailable: 1
+
+ # -- The priority class to use for clustermesh-apiserver
+ priorityClassName: ""
+
+ tls:
+ # -- Configure the clustermesh authentication mode.
+ # Supported values:
+ # - legacy: All clusters access remote clustermesh instances with the same
+ # username (i.e., remote). The "remote" certificate must be
+ # generated with CN=remote if provided manually.
+ # - migration: Intermediate mode required to upgrade from legacy to cluster
+ # (and vice versa) with no disruption. Specifically, it enables
+ # the creation of the per-cluster usernames, while still using
+ # the common one for authentication. The "remote" certificate must
+ # be generated with CN=remote if provided manually (same as legacy).
+ # - cluster: Each cluster accesses remote etcd instances with a username
+ # depending on the local cluster name (i.e., remote-).
+ # The "remote" certificate must be generated with CN=remote-
+ # if provided manually. Cluster mode is meaningful only when the same
+ # CA is shared across all clusters part of the mesh.
+ authMode: legacy
+
+ # -- Configure automatic TLS certificates generation.
+ # A Kubernetes CronJob is used the generate any
+ # certificates not provided by the user at installation
+ # time.
+ auto:
+ # -- When set to true, automatically generate a CA and certificates to
+ # enable mTLS between clustermesh-apiserver and external workload instances.
+ # If set to false, the certs to be provided by setting appropriate values below.
+ enabled: true
+ # Sets the method to auto-generate certificates. Supported values:
+ # - helm: This method uses Helm to generate all certificates.
+ # - cronJob: This method uses a Kubernetes CronJob the generate any
+ # certificates not provided by the user at installation
+ # time.
+ # - certmanager: This method use cert-manager to generate & rotate certificates.
+ method: helm
+ # -- Generated certificates validity duration in days.
+ certValidityDuration: 1095
+ # -- Schedule for certificates regeneration (regardless of their expiration date).
+ # Only used if method is "cronJob". If nil, then no recurring job will be created.
+ # Instead, only the one-shot job is deployed to generate the certificates at
+ # installation time.
+ #
+ # Due to the out-of-band distribution of client certs to external workloads the
+ # CA is (re)regenerated only if it is not provided as a helm value and the k8s
+ # secret is manually deleted.
+ #
+ # Defaults to none. Commented syntax gives midnight of the first day of every
+ # fourth month. For syntax, see
+ # https://kubernetes.io/docs/concepts/workloads/controllers/cron-jobs/#schedule-syntax
+ # schedule: "0 0 1 */4 *"
+
+ # [Example]
+ # certManagerIssuerRef:
+ # group: cert-manager.io
+ # kind: ClusterIssuer
+ # name: ca-issuer
+ # -- certmanager issuer used when clustermesh.apiserver.tls.auto.method=certmanager.
+ certManagerIssuerRef: {}
+ # -- base64 encoded PEM values for the clustermesh-apiserver server certificate and private key.
+ # Used if 'auto' is not enabled.
+ server:
+ cert: ""
+ key: ""
+ # -- Extra DNS names added to certificate when it's auto generated
+ extraDnsNames: []
+ # -- Extra IP addresses added to certificate when it's auto generated
+ extraIpAddresses: []
+ # -- base64 encoded PEM values for the clustermesh-apiserver admin certificate and private key.
+ # Used if 'auto' is not enabled.
+ admin:
+ cert: ""
+ key: ""
+ # -- base64 encoded PEM values for the clustermesh-apiserver client certificate and private key.
+ # Used if 'auto' is not enabled.
+ client:
+ cert: ""
+ key: ""
+ # -- base64 encoded PEM values for the clustermesh-apiserver remote cluster certificate and private key.
+ # Used if 'auto' is not enabled.
+ remote:
+ cert: ""
+ key: ""
+
+ # clustermesh-apiserver Prometheus metrics configuration
+ metrics:
+ # -- Enables exporting apiserver metrics in OpenMetrics format.
+ enabled: true
+ # -- Configure the port the apiserver metric server listens on.
+ port: 9962
+
+ kvstoremesh:
+ # -- Enables exporting KVStoreMesh metrics in OpenMetrics format.
+ enabled: true
+ # -- Configure the port the KVStoreMesh metric server listens on.
+ port: 9964
+
+ etcd:
+ # -- Enables exporting etcd metrics in OpenMetrics format.
+ enabled: true
+ # -- Set level of detail for etcd metrics; specify 'extensive' to include server side gRPC histogram metrics.
+ mode: basic
+ # -- Configure the port the etcd metric server listens on.
+ port: 9963
+
+ serviceMonitor:
+ # -- Enable service monitor.
+ # This requires the prometheus CRDs to be available (see https://github.com/prometheus-operator/prometheus-operator/blob/main/example/prometheus-operator-crd/monitoring.coreos.com_servicemonitors.yaml)
+ enabled: false
+ # -- Labels to add to ServiceMonitor clustermesh-apiserver
+ labels: {}
+ # -- Annotations to add to ServiceMonitor clustermesh-apiserver
+ annotations: {}
+ # -- Specify the Kubernetes namespace where Prometheus expects to find
+ # service monitors configured.
+ # namespace: ""
+
+ # -- Interval for scrape metrics (apiserver metrics)
+ interval: "10s"
+ # -- Relabeling configs for the ServiceMonitor clustermesh-apiserver (apiserver metrics)
+ relabelings: ~
+ # -- Metrics relabeling configs for the ServiceMonitor clustermesh-apiserver (apiserver metrics)
+ metricRelabelings: ~
+
+ kvstoremesh:
+ # -- Interval for scrape metrics (KVStoreMesh metrics)
+ interval: "10s"
+ # -- Relabeling configs for the ServiceMonitor clustermesh-apiserver (KVStoreMesh metrics)
+ relabelings: ~
+ # -- Metrics relabeling configs for the ServiceMonitor clustermesh-apiserver (KVStoreMesh metrics)
+ metricRelabelings: ~
+
+ etcd:
+ # -- Interval for scrape metrics (etcd metrics)
+ interval: "10s"
+ # -- Relabeling configs for the ServiceMonitor clustermesh-apiserver (etcd metrics)
+ relabelings: ~
+ # -- Metrics relabeling configs for the ServiceMonitor clustermesh-apiserver (etcd metrics)
+ metricRelabelings: ~
+
+ # -- Configure external workloads support
+ externalWorkloads:
+ # -- Enable support for external workloads, such as VMs (false by default).
+ enabled: false
+
+ # -- Configure cgroup related configuration
+ cgroup:
+ autoMount:
+ # -- Enable auto mount of cgroup2 filesystem.
+ # When `autoMount` is enabled, cgroup2 filesystem is mounted at
+ # `cgroup.hostRoot` path on the underlying host and inside the cilium agent pod.
+ # If users disable `autoMount`, it's expected that users have mounted
+ # cgroup2 filesystem at the specified `cgroup.hostRoot` volume, and then the
+ # volume will be mounted inside the cilium agent pod at the same path.
+ enabled: true
+ # -- Init Container Cgroup Automount resource limits & requests
+ resources: {}
+ # limits:
+ # cpu: 100m
+ # memory: 128Mi
+ # requests:
+ # cpu: 100m
+ # memory: 128Mi
+ # -- Configure cgroup root where cgroup2 filesystem is mounted on the host (see also: `cgroup.autoMount`)
+ hostRoot: /run/cilium/cgroupv2
+
+ # -- Configure whether to enable auto detect of terminating state for endpoints
+ # in order to support graceful termination.
+ enableK8sTerminatingEndpoint: true
+
+ # -- Configure whether to unload DNS policy rules on graceful shutdown
+ # dnsPolicyUnloadOnShutdown: false
+
+ # -- Configure the key of the taint indicating that Cilium is not ready on the node.
+ # When set to a value starting with `ignore-taint.cluster-autoscaler.kubernetes.io/`, the Cluster Autoscaler will ignore the taint on its decisions, allowing the cluster to scale up.
+ agentNotReadyTaintKey: "node.cilium.io/agent-not-ready"
+
+ dnsProxy:
+ # -- DNS response code for rejecting DNS requests, available options are '[nameError refused]'.
+ dnsRejectResponseCode: refused
+ # -- Allow the DNS proxy to compress responses to endpoints that are larger than 512 Bytes or the EDNS0 option, if present.
+ enableDnsCompression: true
+ # -- Maximum number of IPs to maintain per FQDN name for each endpoint.
+ endpointMaxIpPerHostname: 50
+ # -- Time during which idle but previously active connections with expired DNS lookups are still considered alive.
+ idleConnectionGracePeriod: 0s
+ # -- Maximum number of IPs to retain for expired DNS lookups with still-active connections.
+ maxDeferredConnectionDeletes: 10000
+ # -- The minimum time, in seconds, to use DNS data for toFQDNs policies. If
+ # the upstream DNS server returns a DNS record with a shorter TTL, Cilium
+ # overwrites the TTL with this value. Setting this value to zero means that
+ # Cilium will honor the TTLs returned by the upstream DNS server.
+ minTtl: 0
+ # -- DNS cache data at this path is preloaded on agent startup.
+ preCache: ""
+ # -- Global port on which the in-agent DNS proxy should listen. Default 0 is a OS-assigned port.
+ proxyPort: 0
+ # -- The maximum time the DNS proxy holds an allowed DNS response before sending it along. Responses are sent as soon as the datapath is updated with the new IP information.
+ proxyResponseMaxDelay: 100ms
+ # -- DNS proxy operation mode (true/false, or unset to use version dependent defaults)
+ # enableTransparentMode: true
+
+ # -- SCTP Configuration Values
+ sctp:
+ # -- Enable SCTP support. NOTE: Currently, SCTP support does not support rewriting ports or multihoming.
+ enabled: false
+
+ # Configuration for types of authentication for Cilium (beta)
+ authentication:
+ # -- Enable authentication processing and garbage collection.
+ # Note that if disabled, policy enforcement will still block requests that require authentication.
+ # But the resulting authentication requests for these requests will not be processed, therefore the requests not be allowed.
+ enabled: true
+ # -- Buffer size of the channel Cilium uses to receive authentication events from the signal map.
+ queueSize: 1024
+ # -- Buffer size of the channel Cilium uses to receive certificate expiration events from auth handlers.
+ rotatedIdentitiesQueueSize: 1024
+ # -- Interval for garbage collection of auth map entries.
+ gcInterval: "5m0s"
+ # Configuration for Cilium's service-to-service mutual authentication using TLS handshakes.
+ # Note that this is not full mTLS support without also enabling encryption of some form.
+ # Current encryption options are Wireguard or IPSec, configured in encryption block above.
+ mutual:
+ # -- Port on the agent where mutual authentication handshakes between agents will be performed
+ port: 4250
+ # -- Timeout for connecting to the remote node TCP socket
+ connectTimeout: 5s
+ # Settings for SPIRE
+ spire:
+ # -- Enable SPIRE integration (beta)
+ enabled: false
+ # -- Annotations to be added to all top-level spire objects (resources under templates/spire)
+ annotations: {}
+ # Settings to control the SPIRE installation and configuration
+ install:
+ # -- Enable SPIRE installation.
+ # This will only take effect only if authentication.mutual.spire.enabled is true
+ enabled: true
+ # -- SPIRE namespace to install into
+ namespace: cilium-spire
+ # -- SPIRE namespace already exists. Set to true if Helm should not create, manage, and import the SPIRE namespace.
+ existingNamespace: false
+ # -- init container image of SPIRE agent and server
+ initImage:
+ override: ~
+ repository: "docker.io/library/busybox"
+ tag: "1.36.1"
+ digest: "sha256:223ae047b1065bd069aac01ae3ac8088b3ca4a527827e283b85112f29385fb1b"
+ useDigest: true
+ pullPolicy: "IfNotPresent"
+ # SPIRE agent configuration
+ agent:
+ # -- SPIRE agent image
+ image:
+ override: ~
+ repository: "ghcr.io/spiffe/spire-agent"
+ tag: "1.8.5"
+ digest: "sha256:99405637647968245ff9fe215f8bd2bd0ea9807be9725f8bf19fe1b21471e52b"
+ useDigest: true
+ pullPolicy: "IfNotPresent"
+ # -- SPIRE agent service account
+ serviceAccount:
+ create: true
+ name: spire-agent
+ # -- SPIRE agent annotations
+ annotations: {}
+ # -- SPIRE agent labels
+ labels: {}
+ # -- SPIRE Workload Attestor kubelet verification.
+ skipKubeletVerification: true
+ # -- SPIRE agent tolerations configuration
+ # By default it follows the same tolerations as the agent itself
+ # to allow the Cilium agent on this node to connect to SPIRE.
+ # ref: https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/
+ tolerations:
+ - key: node.kubernetes.io/not-ready
+ effect: NoSchedule
+ - key: node-role.kubernetes.io/master
+ effect: NoSchedule
+ - key: node-role.kubernetes.io/control-plane
+ effect: NoSchedule
+ - key: node.cloudprovider.kubernetes.io/uninitialized
+ effect: NoSchedule
+ value: "true"
+ - key: CriticalAddonsOnly
+ operator: "Exists"
+ # -- SPIRE agent affinity configuration
+ affinity: {}
+ # -- SPIRE agent nodeSelector configuration
+ # ref: ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodeselector
+ nodeSelector: {}
+ # -- Security context to be added to spire agent pods.
+ # SecurityContext holds pod-level security attributes and common container settings.
+ # ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod
+ podSecurityContext: {}
+ # -- Security context to be added to spire agent containers.
+ # SecurityContext holds pod-level security attributes and common container settings.
+ # ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-container
+ securityContext: {}
+ server:
+ # -- SPIRE server image
+ image:
+ override: ~
+ repository: "ghcr.io/spiffe/spire-server"
+ tag: "1.8.5"
+ digest: "sha256:28269265882048dcf0fed32fe47663cd98613727210b8d1a55618826f9bf5428"
+ useDigest: true
+ pullPolicy: "IfNotPresent"
+ # -- SPIRE server service account
+ serviceAccount:
+ create: true
+ name: spire-server
+ # -- SPIRE server init containers
+ initContainers: []
+ # -- SPIRE server annotations
+ annotations: {}
+ # -- SPIRE server labels
+ labels: {}
+ # SPIRE server service configuration
+ service:
+ # -- Service type for the SPIRE server service
+ type: ClusterIP
+ # -- Annotations to be added to the SPIRE server service
+ annotations: {}
+ # -- Labels to be added to the SPIRE server service
+ labels: {}
+ # -- SPIRE server affinity configuration
+ affinity: {}
+ # -- SPIRE server nodeSelector configuration
+ # ref: ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodeselector
+ nodeSelector: {}
+ # -- SPIRE server tolerations configuration
+ # ref: https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/
+ tolerations: []
+ # SPIRE server datastorage configuration
+ dataStorage:
+ # -- Enable SPIRE server data storage
+ enabled: true
+ # -- Size of the SPIRE server data storage
+ size: 1Gi
+ # -- Access mode of the SPIRE server data storage
+ accessMode: ReadWriteOnce
+ # -- StorageClass of the SPIRE server data storage
+ storageClass: null
+ # -- Security context to be added to spire server pods.
+ # SecurityContext holds pod-level security attributes and common container settings.
+ # ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod
+ podSecurityContext: {}
+ # -- Security context to be added to spire server containers.
+ # SecurityContext holds pod-level security attributes and common container settings.
+ # ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-container
+ securityContext: {}
+ # SPIRE CA configuration
+ ca:
+ # -- SPIRE CA key type
+ # AWS requires the use of RSA. EC cryptography is not supported
+ keyType: "rsa-4096"
+ # -- SPIRE CA Subject
+ subject:
+ country: "US"
+ organization: "SPIRE"
+ commonName: "Cilium SPIRE CA"
+ # -- SPIRE server address used by Cilium Operator
+ #
+ # If k8s Service DNS along with port number is used (e.g. ..svc(.*): format),
+ # Cilium Operator will resolve its address by looking up the clusterIP from Service resource.
+ #
+ # Example values: 10.0.0.1:8081, spire-server.cilium-spire.svc:8081
+ serverAddress: ~
+ # -- SPIFFE trust domain to use for fetching certificates
+ trustDomain: spiffe.cilium
+ # -- SPIRE socket path where the SPIRE delegated api agent is listening
+ adminSocketPath: /run/spire/sockets/admin.sock
+ # -- SPIRE socket path where the SPIRE workload agent is listening.
+ # Applies to both the Cilium Agent and Operator
+ agentSocketPath: /run/spire/sockets/agent/agent.sock
+ # -- SPIRE connection timeout
+ connectionTimeout: 30s
diff --git a/terraform/vmo-cluster/manifests/csi-values.yaml b/terraform/vmo-cluster/manifests/csi-values.yaml
new file mode 100644
index 0000000..9b64075
--- /dev/null
+++ b/terraform/vmo-cluster/manifests/csi-values.yaml
@@ -0,0 +1,1317 @@
+pack:
+ content:
+ images: []
+
+ charts:
+ - repo: https://charts.rook.io/release
+ name: rook-release/rook-ceph
+ version: 1.14.9
+ - repo: https://charts.rook.io/release
+ name: rook-release/rook-ceph-cluster
+ version: 1.14.9
+
+ namespace: rook-ceph
+ namespaceLabels:
+ "rook-ceph": "pod-security.kubernetes.io/enforce=privileged,pod-security.kubernetes.io/enforce-version=v{{ .spectro.system.kubernetes.version | substr 0 4 }}"
+
+charts:
+ rook-ceph:
+ # Default values for rook-ceph-operator
+ image:
+ # -- Image
+ repository: rook/ceph
+ # -- Image tag
+ # @default -- `master`
+ tag: v1.14.9
+ # -- Image pull policy
+ pullPolicy: IfNotPresent
+
+ crds:
+ # -- Whether the helm chart should create and update the CRDs. If false, the CRDs must be
+ # managed independently with deploy/examples/crds.yaml.
+ # **WARNING** Only set during first deployment. If later disabled the cluster may be DESTROYED.
+ # If the CRDs are deleted in this case, see
+ # [the disaster recovery guide](https://rook.io/docs/rook/latest/Troubleshooting/disaster-recovery/#restoring-crds-after-deletion)
+ # to restore them.
+ enabled: true
+
+ # -- Pod resource requests & limits
+ resources:
+ limits:
+ memory: 512Mi
+ requests:
+ cpu: 200m
+ memory: 128Mi
+
+ # -- Kubernetes [`nodeSelector`](https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector) to add to the Deployment.
+ nodeSelector: {}
+ # Constraint rook-ceph-operator Deployment to nodes with label `disktype: ssd`.
+ # For more info, see https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector
+ # disktype: ssd
+
+ # -- List of Kubernetes [`tolerations`](https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/) to add to the Deployment.
+ tolerations: []
+
+ # -- Delay to use for the `node.kubernetes.io/unreachable` pod failure toleration to override
+ # the Kubernetes default of 5 minutes
+ unreachableNodeTolerationSeconds: 5
+
+ # -- Whether the operator should watch cluster CRD in its own namespace or not
+ currentNamespaceOnly: false
+
+ # -- Pod annotations
+ annotations: {}
+
+ # -- Global log level for the operator.
+ # Options: `ERROR`, `WARNING`, `INFO`, `DEBUG`
+ logLevel: INFO
+
+ # -- If true, create & use RBAC resources
+ rbacEnable: true
+
+ rbacAggregate:
+ # -- If true, create a ClusterRole aggregated to [user facing roles](https://kubernetes.io/docs/reference/access-authn-authz/rbac/#user-facing-roles) for objectbucketclaims
+ enableOBCs: false
+
+ # -- If true, create & use PSP resources
+ pspEnable: false
+
+ # -- Set the priority class for the rook operator deployment if desired
+ priorityClassName:
+
+ # -- Set the container security context for the operator
+ containerSecurityContext:
+ runAsNonRoot: true
+ runAsUser: 2016
+ runAsGroup: 2016
+ capabilities:
+ drop: [ "ALL" ]
+ # -- If true, loop devices are allowed to be used for osds in test clusters
+ allowLoopDevices: false
+
+ # Settings for whether to disable the drivers or other daemons if they are not
+ # needed
+ csi:
+ # -- Enable Ceph CSI RBD driver
+ enableRbdDriver: true
+ # -- Enable Ceph CSI CephFS driver
+ enableCephfsDriver: true
+ # -- Disable the CSI driver.
+ disableCsiDriver: "false"
+
+ # -- Enable host networking for CSI CephFS and RBD nodeplugins. This may be necessary
+ # in some network configurations where the SDN does not provide access to an external cluster or
+ # there is significant drop in read/write performance
+ enableCSIHostNetwork: true
+ # -- Deprecation note: Rook uses "holder" pods to allow CSI to connect to the multus public network
+ # without needing hosts to the network. Holder pods are being removed. See issue for details:
+ # https://github.com/rook/rook/issues/13055. New Rook deployments should set this to "true".
+ disableHolderPods: true
+ # -- Enable Snapshotter in CephFS provisioner pod
+ enableCephfsSnapshotter: true
+ # -- Enable Snapshotter in NFS provisioner pod
+ enableNFSSnapshotter: true
+ # -- Enable Snapshotter in RBD provisioner pod
+ enableRBDSnapshotter: true
+ # -- Enable Host mount for `/etc/selinux` directory for Ceph CSI nodeplugins
+ enablePluginSelinuxHostMount: false
+ # -- Enable Ceph CSI PVC encryption support
+ enableCSIEncryption: false
+
+ # -- Enable volume group snapshot feature. This feature is
+ # enabled by default as long as the necessary CRDs are available in the cluster.
+ enableVolumeGroupSnapshot: true
+ # -- PriorityClassName to be set on csi driver plugin pods
+ pluginPriorityClassName: system-node-critical
+
+ # -- PriorityClassName to be set on csi driver provisioner pods
+ provisionerPriorityClassName: system-cluster-critical
+
+ # -- Policy for modifying a volume's ownership or permissions when the RBD PVC is being mounted.
+ # supported values are documented at https://kubernetes-csi.github.io/docs/support-fsgroup.html
+ rbdFSGroupPolicy: "File"
+
+ # -- Policy for modifying a volume's ownership or permissions when the CephFS PVC is being mounted.
+ # supported values are documented at https://kubernetes-csi.github.io/docs/support-fsgroup.html
+ cephFSFSGroupPolicy: "File"
+
+ # -- Policy for modifying a volume's ownership or permissions when the NFS PVC is being mounted.
+ # supported values are documented at https://kubernetes-csi.github.io/docs/support-fsgroup.html
+ nfsFSGroupPolicy: "File"
+
+ # -- OMAP generator generates the omap mapping between the PV name and the RBD image
+ # which helps CSI to identify the rbd images for CSI operations.
+ # `CSI_ENABLE_OMAP_GENERATOR` needs to be enabled when we are using rbd mirroring feature.
+ # By default OMAP generator is disabled and when enabled, it will be deployed as a
+ # sidecar with CSI provisioner pod, to enable set it to true.
+ enableOMAPGenerator: false
+
+ # -- Set CephFS Kernel mount options to use https://docs.ceph.com/en/latest/man/8/mount.ceph/#options.
+ # Set to "ms_mode=secure" when connections.encrypted is enabled in CephCluster CR
+ cephFSKernelMountOptions:
+
+ # -- Enable adding volume metadata on the CephFS subvolumes and RBD images.
+ # Not all users might be interested in getting volume/snapshot details as metadata on CephFS subvolume and RBD images.
+ # Hence enable metadata is false by default
+ enableMetadata: false
+
+ # -- Set replicas for csi provisioner deployment
+ provisionerReplicas: 2
+
+ # -- Cluster name identifier to set as metadata on the CephFS subvolume and RBD images. This will be useful
+ # in cases like for example, when two container orchestrator clusters (Kubernetes/OCP) are using a single ceph cluster
+ clusterName:
+
+ # -- Set logging level for cephCSI containers maintained by the cephCSI.
+ # Supported values from 0 to 5. 0 for general useful logs, 5 for trace level verbosity.
+ logLevel: 0
+
+ # -- Set logging level for Kubernetes-csi sidecar containers.
+ # Supported values from 0 to 5. 0 for general useful logs (the default), 5 for trace level verbosity.
+ # @default -- `0`
+ sidecarLogLevel:
+
+ # -- CSI driver name prefix for cephfs, rbd and nfs.
+ # @default -- `namespace name where rook-ceph operator is deployed`
+ csiDriverNamePrefix:
+
+ # -- CSI RBD plugin daemonset update strategy, supported values are OnDelete and RollingUpdate
+ # @default -- `RollingUpdate`
+ rbdPluginUpdateStrategy:
+
+ # -- A maxUnavailable parameter of CSI RBD plugin daemonset update strategy.
+ # @default -- `1`
+ rbdPluginUpdateStrategyMaxUnavailable:
+
+ # -- CSI CephFS plugin daemonset update strategy, supported values are OnDelete and RollingUpdate
+ # @default -- `RollingUpdate`
+ cephFSPluginUpdateStrategy:
+
+ # -- A maxUnavailable parameter of CSI cephFS plugin daemonset update strategy.
+ # @default -- `1`
+ cephFSPluginUpdateStrategyMaxUnavailable:
+
+ # -- CSI NFS plugin daemonset update strategy, supported values are OnDelete and RollingUpdate
+ # @default -- `RollingUpdate`
+ nfsPluginUpdateStrategy:
+
+ # -- Set GRPC timeout for csi containers (in seconds). It should be >= 120. If this value is not set or is invalid, it defaults to 150
+ grpcTimeoutInSeconds: 150
+
+ # -- Allow starting an unsupported ceph-csi image
+ allowUnsupportedVersion: false
+
+ # -- Burst to use while communicating with the kubernetes apiserver.
+ kubeApiBurst:
+
+ # -- QPS to use while communicating with the kubernetes apiserver.
+ kubeApiQPS:
+
+ # -- The volume of the CephCSI RBD plugin DaemonSet
+ csiRBDPluginVolume: # - name: lib-modules
+
+ # hostPath:
+ # path: /run/booted-system/kernel-modules/lib/modules/
+ # - name: host-nix
+ # hostPath:
+ # path: /nix
+
+ # -- The volume mounts of the CephCSI RBD plugin DaemonSet
+ csiRBDPluginVolumeMount: # - name: host-nix
+
+ # mountPath: /nix
+ # readOnly: true
+
+ # -- The volume of the CephCSI CephFS plugin DaemonSet
+ csiCephFSPluginVolume: # - name: lib-modules
+
+ # hostPath:
+ # path: /run/booted-system/kernel-modules/lib/modules/
+ # - name: host-nix
+ # hostPath:
+ # path: /nix
+
+ # -- The volume mounts of the CephCSI CephFS plugin DaemonSet
+ csiCephFSPluginVolumeMount: # - name: host-nix
+
+ # mountPath: /nix
+ # readOnly: true
+
+ # -- CEPH CSI RBD provisioner resource requirement list
+ # csi-omap-generator resources will be applied only if `enableOMAPGenerator` is set to `true`
+ # @default -- see values.yaml
+ csiRBDProvisionerResource: |
+ - name : csi-provisioner
+ resource:
+ requests:
+ memory: 128Mi
+ cpu: 100m
+ limits:
+ memory: 256Mi
+ - name : csi-resizer
+ resource:
+ requests:
+ memory: 128Mi
+ cpu: 100m
+ limits:
+ memory: 256Mi
+ - name : csi-attacher
+ resource:
+ requests:
+ memory: 128Mi
+ cpu: 100m
+ limits:
+ memory: 256Mi
+ - name : csi-snapshotter
+ resource:
+ requests:
+ memory: 128Mi
+ cpu: 100m
+ limits:
+ memory: 256Mi
+ - name : csi-rbdplugin
+ resource:
+ requests:
+ memory: 512Mi
+ limits:
+ memory: 1Gi
+ - name : csi-omap-generator
+ resource:
+ requests:
+ memory: 512Mi
+ cpu: 250m
+ limits:
+ memory: 1Gi
+ - name : liveness-prometheus
+ resource:
+ requests:
+ memory: 128Mi
+ cpu: 50m
+ limits:
+ memory: 256Mi
+
+ # -- CEPH CSI RBD plugin resource requirement list
+ # @default -- see values.yaml
+ csiRBDPluginResource: |
+ - name : driver-registrar
+ resource:
+ requests:
+ memory: 128Mi
+ cpu: 50m
+ limits:
+ memory: 256Mi
+ - name : csi-rbdplugin
+ resource:
+ requests:
+ memory: 512Mi
+ cpu: 250m
+ limits:
+ memory: 1Gi
+ - name : liveness-prometheus
+ resource:
+ requests:
+ memory: 128Mi
+ cpu: 50m
+ limits:
+ memory: 256Mi
+
+ # -- CEPH CSI CephFS provisioner resource requirement list
+ # @default -- see values.yaml
+ csiCephFSProvisionerResource: |
+ - name : csi-provisioner
+ resource:
+ requests:
+ memory: 128Mi
+ cpu: 100m
+ limits:
+ memory: 256Mi
+ - name : csi-resizer
+ resource:
+ requests:
+ memory: 128Mi
+ cpu: 100m
+ limits:
+ memory: 256Mi
+ - name : csi-attacher
+ resource:
+ requests:
+ memory: 128Mi
+ cpu: 100m
+ limits:
+ memory: 256Mi
+ - name : csi-snapshotter
+ resource:
+ requests:
+ memory: 128Mi
+ cpu: 100m
+ limits:
+ memory: 256Mi
+ - name : csi-cephfsplugin
+ resource:
+ requests:
+ memory: 512Mi
+ cpu: 250m
+ limits:
+ memory: 1Gi
+ - name : liveness-prometheus
+ resource:
+ requests:
+ memory: 128Mi
+ cpu: 50m
+ limits:
+ memory: 256Mi
+
+ # -- CEPH CSI CephFS plugin resource requirement list
+ # @default -- see values.yaml
+ csiCephFSPluginResource: |
+ - name : driver-registrar
+ resource:
+ requests:
+ memory: 128Mi
+ cpu: 50m
+ limits:
+ memory: 256Mi
+ - name : csi-cephfsplugin
+ resource:
+ requests:
+ memory: 512Mi
+ cpu: 250m
+ limits:
+ memory: 1Gi
+ - name : liveness-prometheus
+ resource:
+ requests:
+ memory: 128Mi
+ cpu: 50m
+ limits:
+ memory: 256Mi
+
+ # -- CEPH CSI NFS provisioner resource requirement list
+ # @default -- see values.yaml
+ csiNFSProvisionerResource: |
+ - name : csi-provisioner
+ resource:
+ requests:
+ memory: 128Mi
+ cpu: 100m
+ limits:
+ memory: 256Mi
+ - name : csi-nfsplugin
+ resource:
+ requests:
+ memory: 512Mi
+ cpu: 250m
+ limits:
+ memory: 1Gi
+ - name : csi-attacher
+ resource:
+ requests:
+ memory: 512Mi
+ cpu: 250m
+ limits:
+ memory: 1Gi
+
+ # -- CEPH CSI NFS plugin resource requirement list
+ # @default -- see values.yaml
+ csiNFSPluginResource: |
+ - name : driver-registrar
+ resource:
+ requests:
+ memory: 128Mi
+ cpu: 50m
+ limits:
+ memory: 256Mi
+ - name : csi-nfsplugin
+ resource:
+ requests:
+ memory: 512Mi
+ cpu: 250m
+ limits:
+ memory: 1Gi
+
+ # Set provisionerTolerations and provisionerNodeAffinity for provisioner pod.
+ # The CSI provisioner would be best to start on the same nodes as other ceph daemons.
+
+ # -- Array of tolerations in YAML format which will be added to CSI provisioner deployment
+ provisionerTolerations: # - key: key
+
+ # operator: Exists
+ # effect: NoSchedule
+
+ # -- The node labels for affinity of the CSI provisioner deployment [^1]
+ provisionerNodeAffinity:
+ #key1=value1,value2; key2=value3
+ # Set pluginTolerations and pluginNodeAffinity for plugin daemonset pods.
+
+ # The CSI plugins need to be started on all the nodes where the clients need to mount the storage.
+
+ # -- Array of tolerations in YAML format which will be added to CephCSI plugin DaemonSet
+ pluginTolerations: # - key: key
+
+ # operator: Exists
+ # effect: NoSchedule
+
+ # -- The node labels for affinity of the CephCSI RBD plugin DaemonSet [^1]
+ pluginNodeAffinity: # key1=value1,value2; key2=value3
+
+ # -- Enable Ceph CSI Liveness sidecar deployment
+ enableLiveness: false
+
+ # -- CSI CephFS driver metrics port
+ # @default -- `9081`
+ cephfsLivenessMetricsPort:
+
+ # -- CSI Addons server port
+ # @default -- `9070`
+ csiAddonsPort:
+
+ # -- Enable Ceph Kernel clients on kernel < 4.17. If your kernel does not support quotas for CephFS
+ # you may want to disable this setting. However, this will cause an issue during upgrades
+ # with the FUSE client. See the [upgrade guide](https://rook.io/docs/rook/v1.2/ceph-upgrade.html)
+ forceCephFSKernelClient: true
+
+ # -- Ceph CSI RBD driver metrics port
+ # @default -- `8080`
+ rbdLivenessMetricsPort:
+
+ serviceMonitor:
+ # -- Enable ServiceMonitor for Ceph CSI drivers
+ enabled: false
+ # -- Service monitor scrape interval
+ interval: 10s
+ # -- ServiceMonitor additional labels
+ labels: {}
+ # -- Use a different namespace for the ServiceMonitor
+ namespace:
+
+ # -- Kubelet root directory path (if the Kubelet uses a different path for the `--root-dir` flag)
+ # @default -- `/var/lib/kubelet`
+ kubeletDirPath:
+
+ # -- Duration in seconds that non-leader candidates will wait to force acquire leadership.
+ # @default -- `137s`
+ csiLeaderElectionLeaseDuration:
+
+ # -- Deadline in seconds that the acting leader will retry refreshing leadership before giving up.
+ # @default -- `107s`
+ csiLeaderElectionRenewDeadline:
+
+ # -- Retry period in seconds the LeaderElector clients should wait between tries of actions.
+ # @default -- `26s`
+ csiLeaderElectionRetryPeriod:
+
+ cephcsi:
+ # -- Ceph CSI image repository
+ repository: quay.io/cephcsi/cephcsi
+ # -- Ceph CSI image tag
+ tag: v3.11.0
+
+ registrar:
+ # -- Kubernetes CSI registrar image repository
+ repository: registry.k8s.io/sig-storage/csi-node-driver-registrar
+ # -- Registrar image tag
+ tag: v2.10.1
+
+ provisioner:
+ # -- Kubernetes CSI provisioner image repository
+ repository: registry.k8s.io/sig-storage/csi-provisioner
+ # -- Provisioner image tag
+ tag: v4.0.1
+
+ snapshotter:
+ # -- Kubernetes CSI snapshotter image repository
+ repository: registry.k8s.io/sig-storage/csi-snapshotter
+ # -- Snapshotter image tag
+ tag: v7.0.2
+
+ attacher:
+ # -- Kubernetes CSI Attacher image repository
+ repository: registry.k8s.io/sig-storage/csi-attacher
+ # -- Attacher image tag
+ tag: v4.5.1
+
+ resizer:
+ # -- Kubernetes CSI resizer image repository
+ repository: registry.k8s.io/sig-storage/csi-resizer
+ # -- Resizer image tag
+ tag: v1.10.1
+
+ # -- Image pull policy
+ imagePullPolicy: IfNotPresent
+
+ # -- Labels to add to the CSI CephFS Deployments and DaemonSets Pods
+ cephfsPodLabels: #"key1=value1,key2=value2"
+
+ # -- Labels to add to the CSI NFS Deployments and DaemonSets Pods
+ nfsPodLabels: #"key1=value1,key2=value2"
+
+ # -- Labels to add to the CSI RBD Deployments and DaemonSets Pods
+ rbdPodLabels: #"key1=value1,key2=value2"
+
+ csiAddons:
+ # -- Enable CSIAddons
+ enabled: false
+ # -- CSIAddons sidecar image repository
+ repository: quay.io/csiaddons/k8s-sidecar
+ # -- CSIAddons sidecar image tag
+ tag: v0.8.0
+
+ nfs:
+ # -- Enable the nfs csi driver
+ enabled: false
+
+ topology:
+ # -- Enable topology based provisioning
+ enabled: false
+ # NOTE: the value here serves as an example and needs to be
+ # updated with node labels that define domains of interest
+ # -- domainLabels define which node labels to use as domains
+ # for CSI nodeplugins to advertise their domains
+ domainLabels:
+ # - kubernetes.io/hostname
+ # - topology.kubernetes.io/zone
+ # - topology.rook.io/rack
+
+ # -- Whether to skip any attach operation altogether for CephFS PVCs. See more details
+ # [here](https://kubernetes-csi.github.io/docs/skip-attach.html#skip-attach-with-csi-driver-object).
+ # If cephFSAttachRequired is set to false it skips the volume attachments and makes the creation
+ # of pods using the CephFS PVC fast. **WARNING** It's highly discouraged to use this for
+ # CephFS RWO volumes. Refer to this [issue](https://github.com/kubernetes/kubernetes/issues/103305) for more details.
+ cephFSAttachRequired: true
+ # -- Whether to skip any attach operation altogether for RBD PVCs. See more details
+ # [here](https://kubernetes-csi.github.io/docs/skip-attach.html#skip-attach-with-csi-driver-object).
+ # If set to false it skips the volume attachments and makes the creation of pods using the RBD PVC fast.
+ # **WARNING** It's highly discouraged to use this for RWO volumes as it can cause data corruption.
+ # csi-addons operations like Reclaimspace and PVC Keyrotation will also not be supported if set
+ # to false since we'll have no VolumeAttachments to determine which node the PVC is mounted on.
+ # Refer to this [issue](https://github.com/kubernetes/kubernetes/issues/103305) for more details.
+ rbdAttachRequired: true
+ # -- Whether to skip any attach operation altogether for NFS PVCs. See more details
+ # [here](https://kubernetes-csi.github.io/docs/skip-attach.html#skip-attach-with-csi-driver-object).
+ # If cephFSAttachRequired is set to false it skips the volume attachments and makes the creation
+ # of pods using the NFS PVC fast. **WARNING** It's highly discouraged to use this for
+ # NFS RWO volumes. Refer to this [issue](https://github.com/kubernetes/kubernetes/issues/103305) for more details.
+ nfsAttachRequired: true
+
+ # -- Enable discovery daemon
+ enableDiscoveryDaemon: false
+ # -- Set the discovery daemon device discovery interval (default to 60m)
+ discoveryDaemonInterval: 60m
+
+ # -- The timeout for ceph commands in seconds
+ cephCommandsTimeoutSeconds: "15"
+
+ # -- If true, run rook operator on the host network
+ useOperatorHostNetwork:
+
+ # -- If true, scale down the rook operator.
+ # This is useful for administrative actions where the rook operator must be scaled down, while using gitops style tooling
+ # to deploy your helm charts.
+ scaleDownOperator: false
+
+ ## Rook Discover configuration
+ ## toleration: NoSchedule, PreferNoSchedule or NoExecute
+ ## tolerationKey: Set this to the specific key of the taint to tolerate
+ ## tolerations: Array of tolerations in YAML format which will be added to agent deployment
+ ## nodeAffinity: Set to labels of the node to match
+
+ discover:
+ # -- Toleration for the discover pods.
+ # Options: `NoSchedule`, `PreferNoSchedule` or `NoExecute`
+ toleration: # -- The specific key of the taint to tolerate
+
+ tolerationKey: # -- Array of tolerations in YAML format which will be added to discover deployment
+
+ tolerations: # - key: key
+
+ # operator: Exists
+ # effect: NoSchedule
+ # -- The node labels for affinity of `discover-agent` [^1]
+ nodeAffinity: # key1=value1,value2; key2=value3
+
+ #
+ # or
+ #
+ # requiredDuringSchedulingIgnoredDuringExecution:
+ # nodeSelectorTerms:
+ # - matchExpressions:
+ # - key: storage-node
+ # operator: Exists
+ # -- Labels to add to the discover pods
+ podLabels:
+ # "key1=value1,key2=value2"
+ # -- Add resources to discover daemon pods
+
+ resources:
+ # - limits:
+ # memory: 512Mi
+ # - requests:
+ # cpu: 100m
+ # memory: 128Mi
+
+ # -- Runs Ceph Pods as privileged to be able to write to `hostPaths` in OpenShift with SELinux restrictions.
+ hostpathRequiresPrivileged: false
+
+ # -- Disable automatic orchestration when new devices are discovered.
+ disableDeviceHotplug: false
+
+ # -- Blacklist certain disks according to the regex provided.
+ discoverDaemonUdev:
+
+ # -- imagePullSecrets option allow to pull docker images from private docker registry. Option will be passed to all service accounts.
+ imagePullSecrets: # - name: my-registry-secret
+
+ # -- Whether the OBC provisioner should watch on the operator namespace or not, if not the namespace of the cluster will be used
+ enableOBCWatchOperatorNamespace: true
+
+ # -- Specify the prefix for the OBC provisioner in place of the cluster namespace
+ # @default -- `ceph cluster namespace`
+ obcProvisionerNamePrefix:
+
+ monitoring:
+ # -- Enable monitoring. Requires Prometheus to be pre-installed.
+ # Enabling will also create RBAC rules to allow Operator to create ServiceMonitors
+ enabled: false
+
+ rook-ceph-cluster:
+ # Default values for a single rook-ceph cluster
+ # This is a YAML-formatted file.
+ # Declare variables to be passed into your templates.
+
+ # -- Namespace of the main rook operator
+ operatorNamespace: rook-ceph
+
+ # -- The metadata.name of the CephCluster CR
+ # @default -- The same as the namespace
+ clusterName:
+
+ # -- Optional override of the target kubernetes version
+ kubeVersion: # mon_allow_pool_delete = true
+
+ # osd_pool_default_size = 3
+ # osd_pool_default_min_size = 2
+
+ # Installs a debugging toolbox deployment
+ toolbox:
+ # -- Enable Ceph debugging pod deployment. See [toolbox](../Troubleshooting/ceph-toolbox.md)
+ enabled: true
+ # -- Toolbox image, defaults to the image used by the Ceph cluster
+ image:
+ #quay.io/ceph/ceph:v18.2.4
+ # -- Toolbox tolerations
+
+ tolerations: []
+ # -- Toolbox affinity
+ affinity: {}
+ # -- Toolbox container security context
+ containerSecurityContext:
+ runAsNonRoot: true
+ runAsUser: 2016
+ runAsGroup: 2016
+ capabilities:
+ drop: [ "ALL" ]
+ # -- Toolbox resources
+ resources:
+ limits:
+ memory: "1Gi"
+ requests:
+ cpu: "100m"
+ memory: "128Mi"
+ # -- Set the priority class for the toolbox if desired
+ priorityClassName:
+
+
+ monitoring:
+ # -- Enable Prometheus integration, will also create necessary RBAC rules to allow Operator to create ServiceMonitors.
+ # Monitoring requires Prometheus to be pre-installed
+ enabled: false
+ # -- Whether to create the Prometheus rules for Ceph alerts
+ createPrometheusRules: false
+ # -- The namespace in which to create the prometheus rules, if different from the rook cluster namespace.
+ # If you have multiple rook-ceph clusters in the same k8s cluster, choose the same namespace (ideally, namespace with prometheus
+ # deployed) to set rulesNamespaceOverride for all the clusters. Otherwise, you will get duplicate alerts with multiple alert definitions.
+ rulesNamespaceOverride: # Monitoring settings for external clusters:
+
+ # externalMgrEndpoints:
+ # externalMgrPrometheusPort:
+ # Scrape interval for prometheus
+ # interval: 10s
+ # allow adding custom labels and annotations to the prometheus rule
+ prometheusRule:
+ # -- Labels applied to PrometheusRule
+ labels: {}
+ # -- Annotations applied to PrometheusRule
+ annotations: {}
+
+ # -- Create & use PSP resources. Set this to the same value as the rook-ceph chart.
+ pspEnable: false
+
+ # imagePullSecrets option allow to pull docker images from private docker registry. Option will be passed to all service accounts.
+ # imagePullSecrets:
+ # - name: my-registry-secret
+
+ # All values below are taken from the CephCluster CRD
+ # -- Cluster configuration.
+ # @default -- See [below](#ceph-cluster-spec)
+ cephClusterSpec:
+ # This cluster spec example is for a converged cluster where all the Ceph daemons are running locally,
+ # as in the host-based example (cluster.yaml). For a different configuration such as a
+ # PVC-based cluster (cluster-on-pvc.yaml), external cluster (cluster-external.yaml),
+ # or stretch cluster (cluster-stretched.yaml), replace this entire `cephClusterSpec`
+ # with the specs from those examples.
+
+ # For more details, check https://rook.io/docs/rook/v1.10/CRDs/Cluster/ceph-cluster-crd/
+ cephVersion:
+ # The container image used to launch the Ceph daemon pods (mon, mgr, osd, mds, rgw).
+ # v17 is Quincy, v18 is Reef.
+ # RECOMMENDATION: In production, use a specific version tag instead of the general v18 flag, which pulls the latest release and could result in different
+ # versions running within the cluster. See tags available at https://hub.docker.com/r/ceph/ceph/tags/.
+ # If you want to be more precise, you can always use a timestamp tag such as quay.io/ceph/ceph:v18.2.4-20240724
+ # This tag might not contain a new Ceph version, just security fixes from the underlying operating system, which will reduce vulnerabilities
+ image: quay.io/ceph/ceph:v18.2.4
+ # Whether to allow unsupported versions of Ceph. Currently `quincy`, and `reef` are supported.
+ # Future versions such as `squid` (v19) would require this to be set to `true`.
+ # Do not set to true in production.
+ allowUnsupported: false
+
+ # The path on the host where configuration files will be persisted. Must be specified.
+ # Important: if you reinstall the cluster, make sure you delete this directory from each host or else the mons will fail to start on the new cluster.
+ # In Minikube, the '/data' directory is configured to persist across reboots. Use "/data/rook" in Minikube environment.
+ dataDirHostPath: /var/lib/rook
+
+ # Whether or not upgrade should continue even if a check fails
+ # This means Ceph's status could be degraded and we don't recommend upgrading but you might decide otherwise
+ # Use at your OWN risk
+ # To understand Rook's upgrade process of Ceph, read https://rook.io/docs/rook/v1.10/Upgrade/ceph-upgrade/
+ skipUpgradeChecks: false
+
+ # Whether or not continue if PGs are not clean during an upgrade
+ continueUpgradeAfterChecksEvenIfNotHealthy: false
+
+ # WaitTimeoutForHealthyOSDInMinutes defines the time (in minutes) the operator would wait before an OSD can be stopped for upgrade or restart.
+ # If the timeout exceeds and OSD is not ok to stop, then the operator would skip upgrade for the current OSD and proceed with the next one
+ # if `continueUpgradeAfterChecksEvenIfNotHealthy` is `false`. If `continueUpgradeAfterChecksEvenIfNotHealthy` is `true`, then operator would
+ # continue with the upgrade of an OSD even if its not ok to stop after the timeout. This timeout won't be applied if `skipUpgradeChecks` is `true`.
+ # The default wait timeout is 10 minutes.
+ waitTimeoutForHealthyOSDInMinutes: 10
+
+ # Whether or not requires PGs are clean before an OSD upgrade. If set to `true` OSD upgrade process won't start until PGs are healthy.
+ # This configuration will be ignored if `skipUpgradeChecks` is `true`.
+ # Default is false.
+ upgradeOSDRequiresHealthyPGs: false
+
+ mon:
+ # Set the number of mons to be started. Generally recommended to be 3.
+ # For highest availability, an odd number of mons should be specified.
+ count: 1
+ # The mons should be on unique nodes. For production, at least 3 nodes are recommended for this reason.
+ # Mons should only be allowed on the same node for test environments where data loss is acceptable.
+ allowMultiplePerNode: true
+
+ mgr:
+ # When higher availability of the mgr is needed, increase the count to 2.
+ # In that case, one mgr will be active and one in standby. When Ceph updates which
+ # mgr is active, Rook will update the mgr services to match the active mgr.
+ count: 1
+ allowMultiplePerNode: true
+ modules:
+ # List of modules to optionally enable or disable.
+ # Note the "dashboard" and "monitoring" modules are already configured by other settings in the cluster CR.
+ # - name: rook
+ # enabled: true
+
+ # enable the ceph dashboard for viewing cluster status
+ dashboard:
+ enabled: true
+ # serve the dashboard under a subpath (useful when you are accessing the dashboard via a reverse proxy)
+ urlPrefix: /ceph-dashboard
+ # serve the dashboard at the given port.
+ # port: 8443
+ # Serve the dashboard using SSL (if using ingress to expose the dashboard and `ssl: true` you need to set
+ # the corresponding "backend protocol" annotation(s) for your ingress controller of choice)
+ ssl: true
+
+ # Network configuration, see: https://github.com/rook/rook/blob/master/Documentation/CRDs/Cluster/ceph-cluster-crd.md#network-configuration-settings
+ network:
+ connections:
+ # Whether to encrypt the data in transit across the wire to prevent eavesdropping the data on the network.
+ # The default is false. When encryption is enabled, all communication between clients and Ceph daemons, or between Ceph daemons will be encrypted.
+ # When encryption is not enabled, clients still establish a strong initial authentication and data integrity is still validated with a crc check.
+ # IMPORTANT: Encryption requires the 5.11 kernel for the latest nbd and cephfs drivers. Alternatively for testing only,
+ # you can set the "mounter: rbd-nbd" in the rbd storage class, or "mounter: fuse" in the cephfs storage class.
+ # The nbd and fuse drivers are *not* recommended in production since restarting the csi driver pod will disconnect the volumes.
+ encryption:
+ enabled: false
+ # Whether to compress the data in transit across the wire. The default is false.
+ # Requires Ceph Quincy (v17) or newer. Also see the kernel requirements above for encryption.
+ compression:
+ enabled: false
+ # Whether to require communication over msgr2. If true, the msgr v1 port (6789) will be disabled
+ # and clients will be required to connect to the Ceph cluster with the v2 port (3300).
+ # Requires a kernel that supports msgr v2 (kernel 5.11 or CentOS 8.4 or newer).
+ requireMsgr2: false
+ # # enable host networking
+ # provider: host
+ # # EXPERIMENTAL: enable the Multus network provider
+ # provider: multus
+ # selectors:
+ # # The selector keys are required to be `public` and `cluster`.
+ # # Based on the configuration, the operator will do the following:
+ # # 1. if only the `public` selector key is specified both public_network and cluster_network Ceph settings will listen on that interface
+ # # 2. if both `public` and `cluster` selector keys are specified the first one will point to 'public_network' flag and the second one to 'cluster_network'
+ # #
+ # # In order to work, each selector value must match a NetworkAttachmentDefinition object in Multus
+ # #
+ # # public: public-conf --> NetworkAttachmentDefinition object name in Multus
+ # # cluster: cluster-conf --> NetworkAttachmentDefinition object name in Multus
+ # # Provide internet protocol version. IPv6, IPv4 or empty string are valid options. Empty string would mean IPv4
+ # ipFamily: "IPv6"
+ # # Ceph daemons to listen on both IPv4 and Ipv6 networks
+ # dualStack: false
+
+ # enable the crash collector for ceph daemon crash collection
+ crashCollector:
+ disable: true
+ # Uncomment daysToRetain to prune ceph crash entries older than the
+ # specified number of days.
+ # daysToRetain: 30
+
+ # enable log collector, daemons will log on files and rotate
+ logCollector:
+ enabled: true
+ periodicity: daily # one of: hourly, daily, weekly, monthly
+ maxLogSize: 500M # SUFFIX may be 'M' or 'G'. Must be at least 1M.
+
+ # automate [data cleanup process](https://github.com/rook/rook/blob/master/Documentation/Storage-Configuration/ceph-teardown.md#delete-the-data-on-hosts) in cluster destruction.
+ cleanupPolicy:
+ # Since cluster cleanup is destructive to data, confirmation is required.
+ # To destroy all Rook data on hosts during uninstall, confirmation must be set to "yes-really-destroy-data".
+ # This value should only be set when the cluster is about to be deleted. After the confirmation is set,
+ # Rook will immediately stop configuring the cluster and only wait for the delete command.
+ # If the empty string is set, Rook will not destroy any data on hosts during uninstall.
+ confirmation: ""
+ # sanitizeDisks represents settings for sanitizing OSD disks on cluster deletion
+ sanitizeDisks:
+ # method indicates if the entire disk should be sanitized or simply ceph's metadata
+ # in both case, re-install is possible
+ # possible choices are 'complete' or 'quick' (default)
+ method: quick
+ # dataSource indicate where to get random bytes from to write on the disk
+ # possible choices are 'zero' (default) or 'random'
+ # using random sources will consume entropy from the system and will take much more time then the zero source
+ dataSource: zero
+ # iteration overwrite N times instead of the default (1)
+ # takes an integer value
+ iteration: 1
+ # allowUninstallWithVolumes defines how the uninstall should be performed
+ # If set to true, cephCluster deletion does not wait for the PVs to be deleted.
+ allowUninstallWithVolumes: false
+
+ # To control where various services will be scheduled by kubernetes, use the placement configuration sections below.
+ # The example under 'all' would have all services scheduled on kubernetes nodes labeled with 'role=storage-node' and
+ # tolerate taints with a key of 'storage-node'.
+ # placement:
+ # all:
+ # nodeAffinity:
+ # requiredDuringSchedulingIgnoredDuringExecution:
+ # nodeSelectorTerms:
+ # - matchExpressions:
+ # - key: role
+ # operator: In
+ # values:
+ # - storage-node
+ # podAffinity:
+ # podAntiAffinity:
+ # topologySpreadConstraints:
+ # tolerations:
+ # - key: storage-node
+ # operator: Exists
+ # # The above placement information can also be specified for mon, osd, and mgr components
+ # mon:
+ # # Monitor deployments may contain an anti-affinity rule for avoiding monitor
+ # # collocation on the same node. This is a required rule when host network is used
+ # # or when AllowMultiplePerNode is false. Otherwise this anti-affinity rule is a
+ # # preferred rule with weight: 50.
+ # osd:
+ # mgr:
+ # cleanup:
+
+ # annotations:
+ # all:
+ # mon:
+ # osd:
+ # cleanup:
+ # prepareosd:
+ # # If no mgr annotations are set, prometheus scrape annotations will be set by default.
+ # mgr:
+ # dashboard:
+
+ # labels:
+ # all:
+ # mon:
+ # osd:
+ # cleanup:
+ # mgr:
+ # prepareosd:
+ # # monitoring is a list of key-value pairs. It is injected into all the monitoring resources created by operator.
+ # # These labels can be passed as LabelSelector to Prometheus
+ # monitoring:
+ # dashboard:
+
+ resources:
+ mgr:
+ limits:
+ memory: "1Gi"
+ requests:
+ cpu: "500m"
+ memory: "512Mi"
+ mon:
+ limits:
+ memory: "2Gi"
+ requests:
+ cpu: "1000m"
+ memory: "1Gi"
+ osd:
+ limits:
+ memory: "4Gi"
+ requests:
+ cpu: "1000m"
+ memory: "4Gi"
+ prepareosd:
+ # limits: It is not recommended to set limits on the OSD prepare job
+ # since it's a one-time burst for memory that must be allowed to
+ # complete without an OOM kill. Note however that if a k8s
+ # limitRange guardrail is defined external to Rook, the lack of
+ # a limit here may result in a sync failure, in which case a
+ # limit should be added. 1200Mi may suffice for up to 15Ti
+ # OSDs ; for larger devices 2Gi may be required.
+ # cf. https://github.com/rook/rook/pull/11103
+ requests:
+ cpu: "500m"
+ memory: "50Mi"
+ mgr-sidecar:
+ limits:
+ memory: "100Mi"
+ requests:
+ cpu: "100m"
+ memory: "40Mi"
+ crashcollector:
+ limits:
+ memory: "60Mi"
+ requests:
+ cpu: "100m"
+ memory: "60Mi"
+ logcollector:
+ limits:
+ memory: "1Gi"
+ requests:
+ cpu: "100m"
+ memory: "100Mi"
+ cleanup:
+ limits:
+ memory: "1Gi"
+ requests:
+ cpu: "500m"
+ memory: "100Mi"
+ exporter:
+ limits:
+ memory: "128Mi"
+ requests:
+ cpu: "50m"
+ memory: "50Mi"
+
+ # The option to automatically remove OSDs that are out and are safe to destroy.
+ removeOSDsIfOutAndSafeToRemove: true
+
+ # priority classes to apply to ceph resources
+ priorityClassNames:
+ mon: system-node-critical
+ osd: system-node-critical
+ mgr: system-cluster-critical
+
+ storage:
+ # cluster level storage configuration and selection
+ useAllNodes: true
+ useAllDevices: true
+ # deviceFilter:
+ # config:
+ # crushRoot: "custom-root" # specify a non-default root label for the CRUSH map
+ # metadataDevice: "md0" # specify a non-rotational storage so ceph-volume will use it as block db device of bluestore.
+ # databaseSizeMB: "1024" # uncomment if the disks are smaller than 100 GB
+ # osdsPerDevice: "1" # this value can be overridden at the node or device level
+ # encryptedDevice: "true" # the default value for this option is "false"
+ # # Individual nodes and their config can be specified as well, but 'useAllNodes' above must be set to false. Then, only the named
+ # # nodes below will be used as storage resources. Each node's 'name' field should match their 'kubernetes.io/hostname' label.
+ # nodes:
+ # - name: "172.17.4.201"
+ # devices: # specific devices to use for storage can be specified for each node
+ # - name: "sdb"
+ # - name: "nvme01" # multiple osds can be created on high performance devices
+ # config:
+ # osdsPerDevice: "5"
+ # - name: "/dev/disk/by-id/ata-ST4000DM004-XXXX" # devices can be specified using full udev paths
+ # config: # configuration can be specified at the node level which overrides the cluster level config
+ # - name: "172.17.4.301"
+ # deviceFilter: "^sd."
+
+ # The section for configuring management of daemon disruptions during upgrade or fencing.
+ disruptionManagement:
+ # If true, the operator will create and manage PodDisruptionBudgets for OSD, Mon, RGW, and MDS daemons. OSD PDBs are managed dynamically
+ # via the strategy outlined in the [design](https://github.com/rook/rook/blob/master/design/ceph/ceph-managed-disruptionbudgets.md). The operator will
+ # block eviction of OSDs by default and unblock them safely when drains are detected.
+ managePodBudgets: true
+ # A duration in minutes that determines how long an entire failureDomain like `region/zone/host` will be held in `noout` (in addition to the
+ # default DOWN/OUT interval) when it is draining. This is only relevant when `managePodBudgets` is `true`. The default value is `30` minutes.
+ osdMaintenanceTimeout: 30
+ # A duration in minutes that the operator will wait for the placement groups to become healthy (active+clean) after a drain was completed and OSDs came back up.
+ # Operator will continue with the next drain if the timeout exceeds. It only works if `managePodBudgets` is `true`.
+ # No values or 0 means that the operator will wait until the placement groups are healthy before unblocking the next drain.
+ pgHealthCheckTimeout: 0
+
+ # Configure the healthcheck and liveness probes for ceph pods.
+ # Valid values for daemons are 'mon', 'osd', 'status'
+ healthCheck:
+ daemonHealth:
+ mon:
+ disabled: false
+ interval: 45s
+ osd:
+ disabled: false
+ interval: 60s
+ status:
+ disabled: false
+ interval: 60s
+ # Change pod liveness probe, it works for all mon, mgr, and osd pods.
+ livenessProbe:
+ mon:
+ disabled: false
+ mgr:
+ disabled: false
+ osd:
+ disabled: false
+
+ ingress:
+ # -- Enable an ingress for the ceph-dashboard
+ dashboard:
+ annotations:
+ cert-manager.io/issuer: selfsigned-issuer
+ nginx.ingress.kubernetes.io/backend-protocol: HTTPS
+ nginx.ingress.kubernetes.io/server-snippet: |
+ proxy_ssl_verify off;
+ host:
+ name: ceph-vmo-lab.maas-eng.sc
+ path: "/ceph-dashboard/"
+ tls:
+ - hosts:
+ - ceph-vmo-lab.maas-eng.sc
+ secretName: ceph-dashboard-tls
+ ingressClassName: nginx
+
+ # -- A list of CephBlockPool configurations to deploy
+ # @default -- See [below](#ceph-block-pools)
+ cephBlockPools:
+ - name: ceph-blockpool
+ # see https://github.com/rook/rook/blob/master/Documentation/CRDs/Block-Storage/ceph-block-pool-crd.md#spec for available configuration
+ spec:
+ failureDomain: host
+ replicated:
+ size: ${worker_nodes}
+ # Enables collecting RBD per-image IO statistics by enabling dynamic OSD performance counters. Defaults to false.
+ # For reference: https://docs.ceph.com/docs/latest/mgr/prometheus/#rbd-io-statistics
+ # enableRBDStats: true
+ storageClass:
+ enabled: true
+ name: ceph-block
+ annotations:
+ storageclass.kubevirt.io/is-default-virt-class: "true"
+ labels: {}
+ isDefault: false
+ reclaimPolicy: Delete
+ allowVolumeExpansion: true
+ volumeBindingMode: "Immediate"
+ mountOptions: []
+ # see https://kubernetes.io/docs/concepts/storage/storage-classes/#allowed-topologies
+ allowedTopologies: []
+ # - matchLabelExpressions:
+ # - key: rook-ceph-role
+ # values:
+ # - storage-node
+ # see https://github.com/rook/rook/blob/master/Documentation/Storage-Configuration/Block-Storage-RBD/block-storage.md#provision-storage for available configuration
+ parameters:
+ # (optional) mapOptions is a comma-separated list of map options.
+ # For krbd options refer
+ # https://docs.ceph.com/docs/latest/man/8/rbd/#kernel-rbd-krbd-options
+ # For nbd options refer
+ # https://docs.ceph.com/docs/latest/man/8/rbd-nbd/#options
+ # mapOptions: lock_on_read,queue_depth=1024
+
+ # (optional) unmapOptions is a comma-separated list of unmap options.
+ # For krbd options refer
+ # https://docs.ceph.com/docs/latest/man/8/rbd/#kernel-rbd-krbd-options
+ # For nbd options refer
+ # https://docs.ceph.com/docs/latest/man/8/rbd-nbd/#options
+ # unmapOptions: force
+
+ # RBD image format. Defaults to "2".
+ imageFormat: "2"
+
+ # RBD image features, equivalent to OR'd bitfield value: 63
+ # Available for imageFormat: "2". Older releases of CSI RBD
+ # support only the `layering` feature. The Linux kernel (KRBD) supports the
+ # full feature complement as of 5.4
+ imageFeatures: layering
+
+ # These secrets contain Ceph admin credentials.
+ csi.storage.k8s.io/provisioner-secret-name: rook-csi-rbd-provisioner
+ csi.storage.k8s.io/provisioner-secret-namespace: "{{ .Release.Namespace }}"
+ csi.storage.k8s.io/controller-expand-secret-name: rook-csi-rbd-provisioner
+ csi.storage.k8s.io/controller-expand-secret-namespace: "{{ .Release.Namespace }}"
+ csi.storage.k8s.io/node-stage-secret-name: rook-csi-rbd-node
+ csi.storage.k8s.io/node-stage-secret-namespace: "{{ .Release.Namespace }}"
+ # Specify the filesystem type of the volume. If not specified, csi-provisioner
+ # will set default as `ext4`. Note that `xfs` is not recommended due to potential deadlock
+ # in hyperconverged settings where the volume is mounted on the same node as the osds.
+ csi.storage.k8s.io/fstype: ext4
+
+ # -- A list of CephFileSystem configurations to deploy
+ # @default -- See [below](#ceph-file-systems)
+ cephFileSystems:
+ - name: ceph-filesystem
+ # see https://github.com/rook/rook/blob/master/Documentation/CRDs/Shared-Filesystem/ceph-filesystem-crd.md#filesystem-settings for available configuration
+ spec:
+ metadataPool:
+ replicated:
+ size: ${worker_nodes}
+ dataPools:
+ - failureDomain: host
+ replicated:
+ size: ${worker_nodes}
+ # Optional and highly recommended, 'data0' by default, see https://github.com/rook/rook/blob/master/Documentation/CRDs/Shared-Filesystem/ceph-filesystem-crd.md#pools
+ name: data0
+ metadataServer:
+ activeCount: 1
+ activeStandby: true
+ resources:
+ limits:
+ memory: "4Gi"
+ requests:
+ cpu: "1000m"
+ memory: "4Gi"
+ priorityClassName: system-cluster-critical
+ storageClass:
+ enabled: true
+ isDefault: true
+ annotations: {}
+ labels: {}
+ name: ceph-filesystem
+ # (Optional) specify a data pool to use, must be the name of one of the data pools above, 'data0' by default
+ pool: data0
+ reclaimPolicy: Delete
+ allowVolumeExpansion: true
+ volumeBindingMode: "Immediate"
+ mountOptions: []
+ # see https://github.com/rook/rook/blob/master/Documentation/Storage-Configuration/Shared-Filesystem-CephFS/filesystem-storage.md#provision-storage for available configuration
+ parameters:
+ # The secrets contain Ceph admin credentials.
+ csi.storage.k8s.io/provisioner-secret-name: rook-csi-cephfs-provisioner
+ csi.storage.k8s.io/provisioner-secret-namespace: "{{ .Release.Namespace }}"
+ csi.storage.k8s.io/controller-expand-secret-name: rook-csi-cephfs-provisioner
+ csi.storage.k8s.io/controller-expand-secret-namespace: "{{ .Release.Namespace }}"
+ csi.storage.k8s.io/node-stage-secret-name: rook-csi-cephfs-node
+ csi.storage.k8s.io/node-stage-secret-namespace: "{{ .Release.Namespace }}"
+ # Specify the filesystem type of the volume. If not specified, csi-provisioner
+ # will set default as `ext4`. Note that `xfs` is not recommended due to potential deadlock
+ # in hyperconverged settings where the volume is mounted on the same node as the osds.
+ csi.storage.k8s.io/fstype: ext4
+
+ # -- Settings for the filesystem snapshot class
+ # @default -- See [CephFS Snapshots](../Storage-Configuration/Ceph-CSI/ceph-csi-snapshot.md#cephfs-snapshots)
+ cephFileSystemVolumeSnapshotClass:
+ enabled: false
+ name: ceph-filesystem
+ isDefault: true
+ deletionPolicy: Delete
+ annotations: {}
+ labels: {}
+ # see https://rook.io/docs/rook/v1.10/Storage-Configuration/Ceph-CSI/ceph-csi-snapshot/#cephfs-snapshots for available configuration
+ parameters: {}
+
+ # -- Settings for the block pool snapshot class
+ # @default -- See [RBD Snapshots](../Storage-Configuration/Ceph-CSI/ceph-csi-snapshot.md#rbd-snapshots)
+ cephBlockPoolsVolumeSnapshotClass:
+ enabled: false
+ name: ceph-block
+ isDefault: false
+ deletionPolicy: Delete
+ annotations: {}
+ labels: {}
+ # see https://rook.io/docs/rook/v1.10/Storage-Configuration/Ceph-CSI/ceph-csi-snapshot/#rbd-snapshots for available configuration
+ parameters: {}
+
+ # -- A list of CephObjectStore configurations to deploy
+ # @default -- See [below](#ceph-object-stores)
+ cephObjectStores: []
+ ## cephECBlockPools are disabled by default, please remove the comments and set desired values to enable it
+ ## For erasure coded a replicated metadata pool is required.
+ ## https://rook.io/docs/rook/latest/CRDs/Shared-Filesystem/ceph-filesystem-crd/#erasure-coded
+ #cephECBlockPools:
+ # - name: ec-pool
+ # spec:
+ # metadataPool:
+ # replicated:
+ # size: 2
+ # dataPool:
+ # failureDomain: osd
+ # erasureCoded:
+ # dataChunks: 2
+ # codingChunks: 1
+ # deviceClass: hdd
+ #
+ # parameters:
+ # # clusterID is the namespace where the rook cluster is running
+ # # If you change this namespace, also change the namespace below where the secret namespaces are defined
+ # clusterID: rook-ceph # namespace:cluster
+ # # (optional) mapOptions is a comma-separated list of map options.
+ # # For krbd options refer
+ # # https://docs.ceph.com/docs/latest/man/8/rbd/#kernel-rbd-krbd-options
+ # # For nbd options refer
+ # # https://docs.ceph.com/docs/latest/man/8/rbd-nbd/#options
+ # # mapOptions: lock_on_read,queue_depth=1024
+ #
+ # # (optional) unmapOptions is a comma-separated list of unmap options.
+ # # For krbd options refer
+ # # https://docs.ceph.com/docs/latest/man/8/rbd/#kernel-rbd-krbd-options
+ # # For nbd options refer
+ # # https://docs.ceph.com/docs/latest/man/8/rbd-nbd/#options
+ # # unmapOptions: force
+ #
+ # # RBD image format. Defaults to "2".
+ # imageFormat: "2"
+ #
+ # # RBD image features, equivalent to OR'd bitfield value: 63
+ # # Available for imageFormat: "2". Older releases of CSI RBD
+ # # support only the `layering` feature. The Linux kernel (KRBD) supports the
+ # # full feature complement as of 5.4
+ # # imageFeatures: layering,fast-diff,object-map,deep-flatten,exclusive-lock
+ # imageFeatures: layering
+ #
+ # storageClass:
+ # provisioner: rook-ceph.rbd.csi.ceph.com # csi-provisioner-name
+ # enabled: true
+ # name: rook-ceph-block
+ # isDefault: false
+ # annotations: { }
+ # labels: { }
+ # allowVolumeExpansion: true
+ # reclaimPolicy: Delete
+
+ # -- CSI driver name prefix for cephfs, rbd and nfs.
+ # @default -- `namespace name where rook-ceph operator is deployed`
+ csiDriverNamePrefix:
+ configOverride: |
+ [global]
+ osd_pool_default_size = 1
+ mon_warn_on_pool_no_redundancy = false
+ bdev_flock_retry = 20
+ bluefs_buffered_io = false
+ mon_data_avail_warn = 10
diff --git a/terraform/vmo-cluster/manifests/k8s-values.yaml b/terraform/vmo-cluster/manifests/k8s-values.yaml
new file mode 100644
index 0000000..b458475
--- /dev/null
+++ b/terraform/vmo-cluster/manifests/k8s-values.yaml
@@ -0,0 +1,118 @@
+pack:
+ k8sHardening: True
+ content:
+ images:
+ - image: registry.k8s.io/coredns/coredns:v1.11.3
+ - image: registry.k8s.io/etcd:3.5.15-0
+ - image: registry.k8s.io/kube-apiserver:v1.30.6
+ - image: registry.k8s.io/kube-controller-manager:v1.30.6
+ - image: registry.k8s.io/kube-proxy:v1.30.6
+ - image: registry.k8s.io/kube-scheduler:v1.30.6
+ - image: registry.k8s.io/pause:3.9
+ - image: registry.k8s.io/pause:3.8
+ #CIDR Range for Pods in cluster
+ # Note : This must not overlap with any of the host or service network
+ podCIDR: "100.64.0.0/18"
+ #CIDR notation IP range from which to assign service cluster IPs
+ # Note : This must not overlap with any IP ranges assigned to nodes for pods.
+ serviceClusterIpRange: "100.64.64.0/18"
+ palette:
+ config:
+ dashboard:
+ identityProvider: palette
+kubeadmconfig:
+ apiServer:
+ extraArgs:
+ # Note : secure-port flag is used during kubeadm init. Do not change this flag on a running cluster
+ secure-port: "6443"
+ anonymous-auth: "true"
+ profiling: "false"
+ disable-admission-plugins: "AlwaysAdmit"
+ default-not-ready-toleration-seconds: "60"
+ default-unreachable-toleration-seconds: "60"
+ enable-admission-plugins: "AlwaysPullImages,NamespaceLifecycle,ServiceAccount,NodeRestriction,PodSecurity"
+ admission-control-config-file: "/etc/kubernetes/pod-security-standard.yaml"
+ audit-log-path: /var/log/apiserver/audit.log
+ audit-policy-file: /etc/kubernetes/audit-policy.yaml
+ audit-log-maxage: "31"
+ audit-log-maxbackup: "10"
+ audit-log-maxsize: "100"
+ authorization-mode: RBAC,Node
+ kubelet-certificate-authority: "/etc/kubernetes/pki/ca.crt"
+ tls-cipher-suites: "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256"
+ extraVolumes:
+ - name: audit-log
+ hostPath: /var/log/apiserver
+ mountPath: /var/log/apiserver
+ pathType: DirectoryOrCreate
+ - name: audit-policy
+ hostPath: /etc/kubernetes/audit-policy.yaml
+ mountPath: /etc/kubernetes/audit-policy.yaml
+ readOnly: true
+ pathType: File
+ - name: pod-security-standard
+ hostPath: /etc/kubernetes/pod-security-standard.yaml
+ mountPath: /etc/kubernetes/pod-security-standard.yaml
+ readOnly: true
+ pathType: File
+ controllerManager:
+ extraArgs:
+ profiling: "false"
+ terminated-pod-gc-threshold: "25"
+ use-service-account-credentials: "true"
+ feature-gates: "RotateKubeletServerCertificate=true"
+ scheduler:
+ extraArgs:
+ profiling: "false"
+ kubeletExtraArgs:
+ read-only-port: "0"
+ event-qps: "0"
+ feature-gates: "RotateKubeletServerCertificate=true"
+ protect-kernel-defaults: "true"
+ rotate-server-certificates: "true"
+ tls-cipher-suites: "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256"
+ files:
+ - path: hardening/audit-policy.yaml
+ targetPath: /etc/kubernetes/audit-policy.yaml
+ targetOwner: "root:root"
+ targetPermissions: "0600"
+ - path: hardening/90-kubelet.conf
+ targetPath: /etc/sysctl.d/90-kubelet.conf
+ targetOwner: "root:root"
+ targetPermissions: "0600"
+ - targetPath: /etc/kubernetes/pod-security-standard.yaml
+ targetOwner: "root:root"
+ targetPermissions: "0600"
+ content: |
+ apiVersion: apiserver.config.k8s.io/v1
+ kind: AdmissionConfiguration
+ plugins:
+ - name: PodSecurity
+ configuration:
+ apiVersion: pod-security.admission.config.k8s.io/v1
+ kind: PodSecurityConfiguration
+ defaults:
+ enforce: "baseline"
+ enforce-version: "v1.30"
+ audit: "baseline"
+ audit-version: "v1.30"
+ warn: "restricted"
+ warn-version: "v1.30"
+ audit: "restricted"
+ audit-version: "v1.30"
+ exemptions:
+ # Array of authenticated usernames to exempt.
+ usernames: []
+ # Array of runtime class names to exempt.
+ runtimeClasses: []
+ # Array of namespaces to exempt.
+ namespaces: [kube-system]
+
+ preKubeadmCommands:
+ # For enabling 'protect-kernel-defaults' flag to kubelet, kernel parameters changes are required
+ - 'echo "====> Applying kernel parameters for Kubelet"'
+ - 'sysctl -p /etc/sysctl.d/90-kubelet.conf'
+
+ postKubeadmCommands:
+ - 'chmod 600 /var/lib/kubelet/config.yaml'
+ # - 'echo "List of post kubeadm commands to be executed"'
diff --git a/terraform/vmo-cluster/manifests/ubuntu-values.yaml b/terraform/vmo-cluster/manifests/ubuntu-values.yaml
new file mode 100644
index 0000000..607dfc9
--- /dev/null
+++ b/terraform/vmo-cluster/manifests/ubuntu-values.yaml
@@ -0,0 +1,47 @@
+kubeadmconfig:
+ preKubeadmCommands:
+ - 'echo "====> Applying pre Kubeadm commands"'
+ # Force specific IP address as the Node InternalIP for kubelet
+ - apt update
+ - apt install -y grepcidr
+ - |
+ NETWORKS="10.11.136.0/24"
+ IPS=$(hostname -I)
+ for IP in $IPS
+ do
+ echo "$IP" | grepcidr "$NETWORKS" >/dev/null && echo " --node-ip=$IP" >> /etc/default/kubelet
+ if [ $? == 0 ]; then break; fi
+ done
+ # Increase audit_backlog_limit
+ - sed -i 's/GRUB_CMDLINE_LINUX=""/GRUB_CMDLINE_LINUX="audit_backlog_limit=256"/g' /etc/default/grub
+ - update-grub
+ # Clean up stale container images
+ - (crontab -l || true; echo "0 4 * * * /usr/bin/crictl -c /etc/crictl.yaml rmi --prune")| crontab -
+ # Update CA certs
+ - update-ca-certificates
+ # Start containerd with new configuration
+ - systemctl daemon-reload
+ - systemctl restart containerd
+ postKubeadmCommands:
+ - 'echo "====> Applying post Kubeadm commands"'
+ files:
+ - targetPath: /etc/containerd/config.toml
+ targetOwner: "root:root"
+ targetPermissions: "0644"
+ content: |
+ ## template: jinja
+
+ # Use config version 2 to enable new configuration fields.
+ # Config file is parsed as version 1 by default.
+ version = 2
+
+ imports = ["/etc/containerd/conf.d/*.toml"]
+
+ [plugins]
+ [plugins."io.containerd.grpc.v1.cri"]
+ sandbox_image = "registry.k8s.io/pause:3.9"
+ device_ownership_from_security_context = true
+ [plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc]
+ runtime_type = "io.containerd.runc.v2"
+ [plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc.options]
+ SystemdCgroup = true
diff --git a/terraform/vmo-cluster/manifests/vmo-extras-manifest.yaml b/terraform/vmo-cluster/manifests/vmo-extras-manifest.yaml
new file mode 100644
index 0000000..7cc94dc
--- /dev/null
+++ b/terraform/vmo-cluster/manifests/vmo-extras-manifest.yaml
@@ -0,0 +1,136 @@
+apiVersion: spectrocloud.com/v1
+kind: VmTemplate
+metadata:
+ name: ubuntu-2204
+spec:
+ description: Ubuntu 22.04
+ displayName: Ubuntu 22.04
+ icon: https://s3.amazonaws.com/manifests.spectrocloud.com/logos/ubuntu.png
+ running: false
+ dataVolumeTemplates:
+ - metadata:
+ name: ubuntu-2204
+ spec:
+ source:
+ pvc:
+ name: template-ubuntu-2204
+ namespace: vmo-golden-images
+ #storage: (errors in VMO GUI)
+ pvc:
+ accessModes:
+ - ReadWriteMany
+ resources:
+ requests:
+ storage: 50Gi
+ volumeMode: Block
+ storageClassName: ceph-block
+ template:
+ metadata:
+ annotations:
+ descheduler.alpha.kubernetes.io/evict: "true"
+ labels:
+ kubevirt.io/size: small
+ kubevirt.io/domain: hellouni
+ spec:
+ domain:
+ cpu:
+ cores: 2
+ sockets: 1
+ threads: 1
+ devices:
+ disks:
+ - disk:
+ bus: virtio
+ name: datavolume-os
+ - disk:
+ bus: virtio
+ name: cloudinitdisk
+ interfaces:
+ - masquerade: {}
+ name: default
+ model: virtio
+ #macAddress: '00:5e:ab:cd:ef:01'
+ machine:
+ type: q35
+ resources:
+ limits:
+ memory: 2Gi
+ requests:
+ memory: 2Gi
+ networks:
+ - name: default
+ pod: {}
+ volumes:
+ - dataVolume:
+ name: ubuntu-2204
+ name: datavolume-os
+ - cloudInitNoCloud:
+ userData: |
+ #cloud-config
+ ssh_pwauth: True
+ chpasswd: { expire: False }
+ password: spectro
+ disable_root: false
+ runcmd:
+ - apt-get update
+ - apt-get install -y qemu-guest-agent
+ - systemctl start qemu-guest-agent
+ - |
+ apt-get -y install ca-certificates curl
+ install -m 0755 -d /etc/apt/keyrings
+ curl -fsSL https://download.docker.com/linux/ubuntu/gpg -o /etc/apt/keyrings/docker.asc
+ chmod a+r /etc/apt/keyrings/docker.asc
+ echo \
+ "deb [arch=$(dpkg --print-architecture) signed-by=/etc/apt/keyrings/docker.asc] https://download.docker.com/linux/ubuntu \
+ $(. /etc/os-release && echo "$VERSION_CODENAME") stable" | \
+ tee /etc/apt/sources.list.d/docker.list > /dev/null
+ apt-get update
+ apt-get install -y docker-ce docker-ce-cli containerd.io docker-buildx-plugin docker-compose-plugin
+ groupadd docker
+ gpasswd -a ubuntu docker
+ name: cloudinitdisk
+---
+apiVersion: cdi.kubevirt.io/v1beta1
+kind: DataVolume
+metadata:
+ name: "template-ubuntu-2204"
+ namespace: "vmo-golden-images"
+ annotations:
+ cdi.kubevirt.io/storage.deleteAfterCompletion: "false"
+ cdi.kubevirt.io/storage.bind.immediate.requested: ""
+spec:
+ storage:
+ accessModes:
+ - ReadWriteMany
+ resources:
+ requests:
+ storage: 50Gi
+ volumeMode: Block
+ storageClassName: ceph-block
+ source:
+ registry:
+ url: "docker://gcr.io/spectro-images-public/release/vm-dashboard/os/ubuntu-container-disk:22.04"
+---
+apiVersion: cdi.kubevirt.io/v1beta1
+kind: StorageProfile
+metadata:
+ name: ceph-filesystem
+spec:
+ claimPropertySets:
+ - accessModes:
+ - ReadWriteMany
+ volumeMode:
+ Filesystem
+ cloneStrategy: csi-clone
+---
+apiVersion: cdi.kubevirt.io/v1beta1
+kind: StorageProfile
+metadata:
+ name: ceph-block
+spec:
+ claimPropertySets:
+ - accessModes:
+ - ReadWriteMany
+ volumeMode:
+ Block
+ cloneStrategy: csi-clone
diff --git a/terraform/vmo-cluster/manifests/vmo-extras-values.yaml b/terraform/vmo-cluster/manifests/vmo-extras-values.yaml
new file mode 100644
index 0000000..03a8f0f
--- /dev/null
+++ b/terraform/vmo-cluster/manifests/vmo-extras-values.yaml
@@ -0,0 +1,2 @@
+pack:
+ spectrocloud.com/install-priority: "30"
diff --git a/terraform/vmo-cluster/manifests/vmo-values.yaml b/terraform/vmo-cluster/manifests/vmo-values.yaml
new file mode 100644
index 0000000..35b1244
--- /dev/null
+++ b/terraform/vmo-cluster/manifests/vmo-values.yaml
@@ -0,0 +1,600 @@
+pack:
+ content:
+ images:
+ - image: gcr.io/spectro-images-public/release/spectro-vm-dashboard:4.4.10
+ - image: gcr.io/spectro-images-public/release/kubevirt/virt-operator:v1.2.0
+ - image: registry.k8s.io/sig-storage/snapshot-validation-webhook:v6.3.4
+ - image: registry.k8s.io/sig-storage/snapshot-controller:v6.3.4
+ - image: ghcr.io/k8snetworkplumbingwg/multus-cni:v4.0.2-thick
+ - image: ghcr.io/k8snetworkplumbingwg/multus-dynamic-networks-controller:latest-amd64
+ - image: quay.io/kubevirt/cdi-operator:v1.58.0
+ - image: quay.io/kubevirt/cdi-uploadproxy:v1.58.0
+ - image: quay.io/kubevirt/cdi-controller:v1.58.0
+ - image: quay.io/kubevirt/cdi-apiserver:v1.58.0
+ - image: quay.io/kubevirt/cdi-importer:v1.58.0
+ - image: quay.io/kubevirt/cdi-uploadserver:v1.58.0
+ - image: quay.io/kubevirt/cdi-cloner:v1.58.0
+ - image: gcr.io/spectro-images-public/release/kubevirt/virt-handler:v1.2.0
+ - image: gcr.io/spectro-images-public/release/kubevirt/virt-launcher:v1.2.0
+ - image: gcr.io/spectro-images-public/release/kubevirt/virt-exportproxy:v1.2.0
+ - image: gcr.io/spectro-images-public/release/kubevirt/virt-exportserver:v1.2.0
+ - image: gcr.io/spectro-images-public/release/kubevirt/virt-controller:v1.2.0
+ - image: gcr.io/spectro-images-public/release/kubevirt/virt-api:v1.2.0
+ - image: registry.k8s.io/descheduler/descheduler:v0.30.1
+ - image: gcr.io/spectro-images-public/release/virtual-machine-orchestrator/os/ubuntu-container-disk:22.04
+ - image: gcr.io/spectro-images-public/release/virtual-machine-orchestrator/os/fedora-container-disk:37
+ - image: gcr.io/spectro-images-public/release/virtual-machine-orchestrator/vlan-filtering/ubuntu:latest
+ - image: gcr.io/spectro-images-public/release/spectro-cleanup:1.0.2
+ - image: gcr.io/spectro-images-public/release/spectro-kubectl:1.30.2-spectro-4.4.a
+ namespace: vm-dashboard
+ palette:
+ config:
+ dashboard:
+ access: private
+ spectrocloud.com/install-priority: "20"
+charts:
+ virtual-machine-orchestrator:
+ image:
+ repository: gcr.io/spectro-images-public/release/spectro-vm-dashboard
+ tag: "4.4.10"
+ service:
+ type: "ClusterIP"
+ appConfig:
+ clusterInfo:
+ consoleBaseAddress: ""
+ fullnameOverride: "virtual-machine-orchestrator"
+ serviceAccount:
+ # Specifies whether a service account should be created
+ create: true
+ # Annotations to add to the service account
+ annotations: {}
+ # The name of the service account to use.
+ # If not set and create is true, a name is generated using the fullname template
+ name: "virtual-machine-orchestrator"
+ sampleTemplates:
+ fedora37: false
+ ubuntu2204: false
+ ubuntu2204WithVol: false
+ ubuntu2204staticIP: false
+ fedora37staticIP: false
+ # To create additional vm templates refer to https://docs.spectrocloud.com/vm-management/create-manage-vm/create-vm-template
+ # This namespace will be used to store golden images
+ goldenImagesNamespace: "vmo-golden-images"
+ # These namespaces will be created and set up to deploy VMs into
+ vmEnabledNamespaces:
+ - "default"
+ - "virtual-machines"
+ - ns-adv
+ - ns-edge
+ - ns-product
+ - ns-packs
+ grafana:
+ namespace: monitoring
+ vlanFiltering:
+ enabled: true
+ namespace: kube-system
+ image:
+ repository: gcr.io/spectro-images-public/release/virtual-machine-orchestrator/vlan-filtering/ubuntu
+ pullPolicy: IfNotPresent
+ tag: "latest"
+ env:
+ # Which bridge interface to control
+ bridgeIF: "br0"
+ # Beginning of VLAN range to enable
+ allowedVlans: "128,129"
+ # Set to "true" to enable VLANs on the br0 interface for the host to use itself
+ allowVlansOnSelf: "true"
+ # Beginning of VLAN range to enable for use by the node itself
+ allowedVlansOnSelf: "128,129"
+ snapshot-controller:
+ enabled: true
+ replicas: 1
+ # controller image and policies
+ image:
+ repository: registry.k8s.io/sig-storage/snapshot-controller
+ pullPolicy: IfNotPresent
+ tag: "v6.3.4"
+ # A list/array of extra args that should be used
+ # when running the controller. Default args include log verbose level
+ # and leader election
+ extraArgs: []
+ # snapshot webhook config
+ webhook:
+ # all below values take effect only if webhook is enabled
+ enabled: true
+ # webhook controller image and policies
+ image:
+ # change the image if you wish to use your own custom validation server image
+ repository: registry.k8s.io/sig-storage/snapshot-validation-webhook
+ pullPolicy: IfNotPresent
+ # Overrides the image tag whose default is the chart appVersion.
+ tag: "v6.3.4"
+ validatingWebhook:
+ failurePolicy: Fail
+ timeoutSeconds: 2
+ # Validating webhook is exposed on an HTTPS endpoint, and so
+ # TLS certificate is required. This Helm chart relies on
+ # cert-manager.io for managing TLS certificates.
+ tls:
+ # If not empty, this issuer will be used to sign the certificate.
+ # If none is provided, a new, self-signing issuer will be created.
+ issuerRef: {}
+ # name:
+ # kind:
+ # group: cert-manager.io
+
+ # Certificate duration. The generated certificate will be automatically
+ # renewed 1/3 of `certDuration` before its expiry.
+ # Value must be in units accepted by Go time.ParseDuration.
+ # See https://golang.org/pkg/time/#ParseDuration for allowed formats.
+ # Minimum accepted duration is `1h`.
+ # This option may be ignored/overridden by some issuer types.
+ certDuration: 8760h
+ service:
+ # when running in cluster webhook service is recommended to be of type ClusterIP
+ type: ClusterIP
+ port: 443
+ serviceAccount:
+ # Specifies whether a service account should be created.
+ create: true
+ # Annotations to add to the service account.
+ annotations: {}
+ # The name of the service account to use.
+ # If not set and create is true, a name is generated using the fullname template.
+ name: ""
+ # Log verbosity level.
+ # See https://github.com/kubernetes/community/blob/master/contributors/devel/sig-instrumentation/logging.md
+ # for description of individual verbosity levels.
+ logVerbosityLevel: 2
+ podAnnotations: {}
+ resources: {}
+ nodeSelector: {}
+ tolerations: []
+ affinity: {}
+ nameOverride: ""
+ fullnameOverride: ""
+ imagePullSecrets: []
+ nameOverride: ""
+ fullnameOverride: ""
+ resources: {}
+ # We usually recommend not to specify default resources and to leave this as a conscious
+ # choice for the user. This also increases chances charts run on environments with little
+ # resources, such as Minikube. If you do want to specify resources, uncomment the following
+ # lines, adjust them as necessary, and remove the curly braces after 'resources:'.
+ # limits:
+ # cpu: 100m
+ # memory: 128Mi
+ # requests:
+ # cpu: 100m
+ # memory: 128Mi
+
+ nodeSelector: {}
+ tolerations: []
+ affinity: {}
+ # create a default volume snapshot class
+ volumeSnapshotClass:
+ create: true
+ name: "ceph-block-snapshot-class"
+ driver: "rook-ceph.rbd.csi.ceph.com"
+ # deletionPolicy determines whether a VolumeSnapshotContent created through
+ # the VolumeSnapshotClass should be deleted when its bound VolumeSnapshot is deleted.
+ # Supported values are "Retain" and "Delete".
+ deletionPolicy: "Delete"
+ # params is a key-value map with storage driver specific parameters for creating snapshots.
+ params:
+ clusterID: rook-ceph
+ csi.storage.k8s.io/snapshotter-secret-name: csi-rbd-secret
+ csi.storage.k8s.io/snapshotter-secret-namespace: rook-ceph
+ # key-value pair of extra labels to apply to the volumesnapshotclass
+ extraLabels:
+ velero.io/csi-volumesnapshot-class: "true"
+ # time for sleep hook in seconds
+ hooksleepTime: 12
+ # this install cert-manager latest version if not already installed
+ cert-manager:
+ enabled: false
+ installCRDs: true
+ kubevirt:
+ enabled: true
+ # defaults to kubevirt
+ namespace: kubevirt
+ namespaceLabels:
+ pod-security.kubernetes.io/enforce: privileged
+ pod-security.kubernetes.io/enforce-version: v{{ .spectro.system.kubernetes.version | substr 0 4 }}
+ replicas: 1
+ service:
+ type: ClusterIP
+ port: 443
+ targetPort: 8443
+ image:
+ repository: gcr.io/spectro-images-public/release/kubevirt/virt-operator
+ pullPolicy: IfNotPresent
+ # Overrides the image tag whose default is the chart appVersion.
+ tag: "v1.2.0"
+ ## The Kubevirt CR that gets created
+ kubevirtResource:
+ name: kubevirt
+ useEmulation: false
+ # below gates are required for virtual machine orchestrator pack, users can append additional gates
+ additionalFeatureGates:
+ - LiveMigration
+ - HotplugVolumes
+ - Snapshot
+ - VMExport
+ - ExpandDisks
+ - HotplugNICs
+ - VMLiveUpdateFeatures
+ - VMPersistentState
+ - Sidecar
+ # for additional feature gates refer to https://docs.spectrocloud.com/vm-management#featuregates
+ config:
+ evictionStrategy: "LiveMigrate"
+ # additionalConfig lets you define any configuration other than developerConfiguration and evictionStrategy
+ additionalConfig:
+ vmStateStorageClass: "ceph-filesystem"
+ # additionalDevConfig lets you define dev config other than emulation and feature gate
+ additionalDevConfig: {}
+ # vmRolloutStrategy lets you define how changes to a VM object propagate to its VMI objects
+ vmRolloutStrategy: LiveUpdate
+ certificateRotateStrategy: {}
+ customizeComponents:
+ # flags:
+ # api:
+ # v:
+ # "5"
+ # port:
+ # "8443"
+ imagePullPolicy: IfNotPresent
+ infra: {}
+ # The name of the Prometheus service account that needs read-access to KubeVirt endpoints
+ monitorAccount: "prometheus-operator-prometheus"
+ # The namespace Prometheus is deployed in
+ monitorNamespace: "monitoring"
+ # The namespace the service monitor will be deployed. Either specify this or the monitorNamespace
+ serviceMonitorNamespace: "monitoring"
+ workloads: {}
+ workloadsUpdateStrategy:
+ workloadUpdateMethods:
+ - LiveMigrate
+ # uninstallStrategy to use, options are RemoveWorkloads, BlockUninstallIfWorkloadsExist
+ uninstallStrategy: ""
+ ingress:
+ enabled: true
+ ingressClassName: nginx
+ annotations:
+ cert-manager.io/issuer: kubevirt-selfsigned-issuer
+ nginx.ingress.kubernetes.io/backend-protocol: "HTTPS"
+ labels: {}
+ hosts:
+ - host: virt-exportproxy.maas-eng.sc
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ tls:
+ - secretName: virt-exportproxy-tls
+ hosts:
+ - virt-exportproxy.maas-eng.sc
+ # - secretName: chart-example-tls
+ # hosts:
+ # - virt-exportproxy.maas.sc
+ cdi:
+ enabled: true
+ namespaceLabels:
+ pod-security.kubernetes.io/enforce: privileged
+ pod-security.kubernetes.io/enforce-version: v{{ .spectro.system.kubernetes.version | substr 0 4 }}
+ replicas: 1
+ image:
+ repository: quay.io/kubevirt/cdi-operator
+ pullPolicy: IfNotPresent
+ # Overrides the image tag whose default is the chart appVersion.
+ tag: "v1.58.0"
+ service:
+ type: ClusterIP
+ port: 443
+ targetPort: 8443
+ # set enabled to true and add private registry details to bring up VMs in airgap environment
+ privateRegistry:
+ enabled: false
+ registryIP: #Ex: 10.10.225.20
+ registryBasePath: #Ex: specto-images
+ ## The CDI CR that gets created
+ cdiResource:
+ additionalFeatureGates:
+ # - FeatureName
+ additionalConfig:
+ podResourceRequirements:
+ requests:
+ cpu: 1
+ memory: 2G
+ limits:
+ cpu: 2
+ memory: 8G
+ filesystemOverhead:
+ global: "0.055"
+ storageClass:
+ spectro-storage-class: "0.1"
+ #insecureRegistries: [] # List of insecure registries to allow in the CDI importer, preffered in air-gapped environments
+ #importProxy:
+ # HTTPProxy: "http://username:password@your-proxy-server:3128"
+ # HTTPSProxy: "http://username:password@your-proxy-server:3128"
+ # noProxy: "127.0.0.1,localhost,10.0.0.0/8,172.16.0.0/12,192.168.0.0/16,.company.local"
+ # TrustedCAProxy: configmap-name # optional: the ConfigMap name of a user-provided trusted certificate authority (CA) bundle to be added to the importer pod CA bundle
+ ingress:
+ enabled: true
+ className: "nginx"
+ annotations:
+ cert-manager.io/issuer: cdi-selfsigned-issuer
+ nginx.ingress.kubernetes.io/proxy-body-size: "0"
+ nginx.ingress.kubernetes.io/proxy-read-timeout: "600"
+ nginx.ingress.kubernetes.io/proxy-send-timeout: "600"
+ nginx.ingress.kubernetes.io/proxy-request-buffering: "off"
+ nginx.ingress.kubernetes.io/backend-protocol: "HTTPS"
+ hosts:
+ - host: cdi-uploadproxy.maas-eng.sc
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ tls:
+ - secretName: cdi-uploadproxy-tls
+ hosts:
+ - cdi-uploadproxy.maas-eng.sc
+ # - secretName: chart-example-tls
+ # hosts:
+ # - cdi-uploadproxy.maas.sc
+ multus:
+ enabled: true
+ image:
+ repository: ghcr.io/k8snetworkplumbingwg/multus-cni
+ pullPolicy: IfNotPresent
+ # Overrides the image tag whose default is the chart appVersion.
+ tag: "v4.0.2-thick"
+ networkController:
+ criSocket:
+ enableK3SHostPath: false # true for K3S and RKE2, false for PXK-E
+ criSocketContainerPath: /host/run/containerd/containerd.sock
+ imagePullSecrets: []
+ podAnnotations: {}
+ nodeSelector: {}
+ affinity: {}
+ dpdkCompatibility: false
+ cleanup:
+ image: gcr.io/spectro-images-public/release/spectro-cleanup
+ tag: "1.0.2"
+ networkAttachDef:
+ create: false
+ # a json string to apply
+ config: ''
+ # a sample config
+ # '{
+ # "cniVersion": "0.3.0",
+ # "type": "macvlan",
+ # "master": "ens5",
+ # "mode": "bridge",
+ # "ipam": {
+ # "type": "host-local",
+ # "subnet": "192.168.1.0/24",
+ # "rangeStart": "192.168.1.200",
+ # "rangeEnd": "192.168.1.216",
+ # "routes": [
+ # { "dst": "0.0.0.0/0" }
+ # ],
+ # "gateway": "192.168.1.1"
+ # }
+ # }'
+ descheduler:
+ enabled: true
+ namespace: "kube-system"
+ # CronJob or Deployment
+ kind: CronJob
+ image:
+ repository: registry.k8s.io/descheduler/descheduler
+ # Overrides the image tag whose default is the chart version
+ tag: "v0.30.1"
+ pullPolicy: IfNotPresent
+ imagePullSecrets:
+ # - name: container-registry-secret
+ resources:
+ requests:
+ cpu: 500m
+ memory: 256Mi
+ limits:
+ cpu: 500m
+ memory: 256Mi
+ securityContext:
+ allowPrivilegeEscalation: false
+ capabilities:
+ drop:
+ - ALL
+ privileged: false
+ readOnlyRootFilesystem: true
+ runAsNonRoot: true
+ runAsUser: 1000
+ # podSecurityContext -- [Security context for pod](https://kubernetes.io/docs/tasks/configure-pod-container/security-context/)
+ podSecurityContext: {}
+ # fsGroup: 1000
+
+ nameOverride: ""
+ fullnameOverride: "descheduler"
+ # labels that'll be applied to all resources
+ commonLabels: {}
+ cronJobApiVersion: "batch/v1"
+ schedule: "*/2 * * * *"
+ suspend: false
+ # startingDeadlineSeconds: 200
+ # successfulJobsHistoryLimit: 3
+ # failedJobsHistoryLimit: 1
+ # ttlSecondsAfterFinished 600
+ # timeZone: Etc/UTC
+
+ # Required when running as a Deployment
+ deschedulingInterval: 5m
+ # Specifies the replica count for Deployment
+ # Set leaderElection if you want to use more than 1 replica
+ # Set affinity.podAntiAffinity rule if you want to schedule onto a node
+ # only if that node is in the same zone as at least one already-running descheduler
+ replicas: 1
+ # Specifies whether Leader Election resources should be created
+ # Required when running as a Deployment
+ # NOTE: Leader election can't be activated if DryRun enabled
+ leaderElection: {}
+ # enabled: true
+ # leaseDuration: 15s
+ # renewDeadline: 10s
+ # retryPeriod: 2s
+ # resourceLock: "leases"
+ # resourceName: "descheduler"
+ # resourceNamescape: "kube-system"
+
+ command:
+ - "/bin/descheduler"
+ cmdOptions:
+ v: 3
+ # Recommended to use the latest Policy API version supported by the Descheduler app version
+ deschedulerPolicyAPIVersion: "descheduler/v1alpha2"
+ deschedulerPolicy:
+ # nodeSelector: "key1=value1,key2=value2"
+ # maxNoOfPodsToEvictPerNode: 10
+ # maxNoOfPodsToEvictPerNamespace: 10
+ # ignorePvcPods: true
+ # evictLocalStoragePods: true
+ # evictDaemonSetPods: true
+ # tracing:
+ # collectorEndpoint: otel-collector.observability.svc.cluster.local:4317
+ # transportCert: ""
+ # serviceName: ""
+ # serviceNamespace: ""
+ # sampleRate: 1.0
+ # fallbackToNoOpProviderOnError: true
+ profiles:
+ - name: default
+ pluginConfig:
+ - name: DefaultEvictor
+ args:
+ ignorePvcPods: true
+ evictLocalStoragePods: true
+ - name: RemoveDuplicates
+ - name: RemovePodsHavingTooManyRestarts
+ args:
+ podRestartThreshold: 100
+ includingInitContainers: true
+ - name: RemovePodsViolatingNodeAffinity
+ args:
+ nodeAffinityType:
+ - requiredDuringSchedulingIgnoredDuringExecution
+ - name: RemovePodsViolatingNodeTaints
+ - name: RemovePodsViolatingInterPodAntiAffinity
+ - name: RemovePodsViolatingTopologySpreadConstraint
+ - name: LowNodeUtilization
+ args:
+ thresholds:
+ cpu: 20
+ memory: 20
+ pods: 20
+ targetThresholds:
+ cpu: 50
+ memory: 50
+ pods: 50
+ plugins:
+ balance:
+ enabled:
+ - RemoveDuplicates
+ - RemovePodsViolatingTopologySpreadConstraint
+ - LowNodeUtilization
+ deschedule:
+ enabled:
+ - RemovePodsHavingTooManyRestarts
+ - RemovePodsViolatingNodeTaints
+ - RemovePodsViolatingNodeAffinity
+ - RemovePodsViolatingInterPodAntiAffinity
+ priorityClassName: system-cluster-critical
+ nodeSelector: {}
+ # foo: bar
+
+ affinity: {}
+ # nodeAffinity:
+ # requiredDuringSchedulingIgnoredDuringExecution:
+ # nodeSelectorTerms:
+ # - matchExpressions:
+ # - key: kubernetes.io/e2e-az-name
+ # operator: In
+ # values:
+ # - e2e-az1
+ # - e2e-az2
+ # podAntiAffinity:
+ # requiredDuringSchedulingIgnoredDuringExecution:
+ # - labelSelector:
+ # matchExpressions:
+ # - key: app.kubernetes.io/name
+ # operator: In
+ # values:
+ # - descheduler
+ # topologyKey: "kubernetes.io/hostname"
+ topologySpreadConstraints: []
+ # - maxSkew: 1
+ # topologyKey: kubernetes.io/hostname
+ # whenUnsatisfiable: DoNotSchedule
+ # labelSelector:
+ # matchLabels:
+ # app.kubernetes.io/name: descheduler
+ tolerations: []
+ # - key: 'management'
+ # operator: 'Equal'
+ # value: 'tool'
+ # effect: 'NoSchedule'
+
+ rbac:
+ # Specifies whether RBAC resources should be created
+ create: true
+ serviceAccount:
+ # Specifies whether a ServiceAccount should be created
+ create: true
+ # The name of the ServiceAccount to use.
+ # If not set and create is true, a name is generated using the fullname template
+ name:
+ # Specifies custom annotations for the serviceAccount
+ annotations: {}
+ podAnnotations: {}
+ podLabels: {}
+ dnsConfig: {}
+ livenessProbe:
+ failureThreshold: 3
+ httpGet:
+ path: /healthz
+ port: 10258
+ scheme: HTTPS
+ initialDelaySeconds: 3
+ periodSeconds: 10
+ service:
+ enabled: false
+ # @param service.ipFamilyPolicy [string], support SingleStack, PreferDualStack and RequireDualStack
+ #
+ ipFamilyPolicy: ""
+ # @param service.ipFamilies [array] List of IP families (e.g. IPv4, IPv6) assigned to the service.
+ # Ref: https://kubernetes.io/docs/concepts/services-networking/dual-stack/
+ # E.g.
+ # ipFamilies:
+ # - IPv6
+ # - IPv4
+ ipFamilies: []
+ serviceMonitor:
+ enabled: false
+ # The namespace where Prometheus expects to find service monitors.
+ # namespace: ""
+ # Add custom labels to the ServiceMonitor resource
+ additionalLabels: {}
+ # prometheus: kube-prometheus-stack
+ interval: ""
+ # honorLabels: true
+ insecureSkipVerify: true
+ serverName: null
+ metricRelabelings: []
+ # - action: keep
+ # regex: 'descheduler_(build_info|pods_evicted)'
+ # sourceLabels: [__name__]
+ relabelings: []
+ # - sourceLabels: [__meta_kubernetes_pod_node_name]
+ # separator: ;
+ # regex: ^(.*)$
+ # targetLabel: nodename
+ # replacement: $1
+ # action: replace
diff --git a/terraform/vmo-cluster/provider.tf b/terraform/vmo-cluster/provider.tf
new file mode 100644
index 0000000..8294ea0
--- /dev/null
+++ b/terraform/vmo-cluster/provider.tf
@@ -0,0 +1,29 @@
+# Copyright (c) Spectro Cloud
+# SPDX-License-Identifier: Apache-2.0
+
+terraform {
+ required_providers {
+ spectrocloud = {
+ version = ">= 0.22.2"
+ source = "spectrocloud/spectrocloud"
+ }
+
+ tls = {
+ source = "hashicorp/tls"
+ version = "4.0.4"
+ }
+
+ local = {
+ source = "hashicorp/local"
+ version = "2.4.1"
+ }
+ }
+
+ required_version = ">= 1.9"
+}
+
+
+provider "spectrocloud" {
+ # API key set through the environment variable SPECTROCLOUD_APIKEY
+ project_name = var.palette-project
+}
diff --git a/terraform/vmo-cluster/terraform.tfvars b/terraform/vmo-cluster/terraform.tfvars
new file mode 100644
index 0000000..9cd7a18
--- /dev/null
+++ b/terraform/vmo-cluster/terraform.tfvars
@@ -0,0 +1,26 @@
+# Copyright (c) Spectro Cloud
+# SPDX-License-Identifier: Apache-2.0
+
+#####################
+# Palette Settings
+#####################
+palette-project = "Default" # The name of your project in Palette.
+
+############################
+# MAAS Deployment Settings
+############################
+deploy-maas = false # Set to true to deploy to MAAS.
+deploy-maas-vm = false # Set to true to create a VM on MAAS cluster once deployed.
+
+pcg-name = "REPLACE ME" # Provide the name of the PCG that will be used to deploy the Palette cluster.
+maas-domain = "REPLACE ME" # Provide the MAAS domain that will be used to deploy the Palette cluster.
+
+maas-worker-nodes = 1 # Provide the number of worker nodes that will be used for the Palette cluster.
+maas-worker-resource-pool = "REPLACE ME" # Provide a resource pool for the worker nodes.
+maas-worker-azs = ["REPLACE ME"] # Provide a set of availability zones for the worker nodes.
+maas-worker-node-tags = ["REPLACE ME"] # Provide a set of node tags for the worker nodes.
+
+maas-control-plane-nodes = 1 # Provide the number of control plane nodes that will be used for the Palette cluster.
+maas-control-plane-resource-pool = "REPLACE ME" # Provide a resource pool for the control plane nodes.
+maas-control-plane-azs = ["REPLACE ME"] # Provide a set of availability zones for the control plane nodes.
+maas-control-plane-node-tags = ["REPLACE ME"] # Provide a set of node tags for the control plane nodes.
diff --git a/terraform/vmo-cluster/tests/maas-cluster-missing-values.tftest.hcl b/terraform/vmo-cluster/tests/maas-cluster-missing-values.tftest.hcl
new file mode 100644
index 0000000..78eaaef
--- /dev/null
+++ b/terraform/vmo-cluster/tests/maas-cluster-missing-values.tftest.hcl
@@ -0,0 +1,37 @@
+# Copyright (c) Spectro Cloud
+# SPDX-License-Identifier: Apache-2.0
+# Test case 4 - Verify PCG name, domain, resource pools, AZs and node tags cannot be empty.
+
+variables {
+ deploy-maas = true
+ deploy-maas-vm = false
+ pcg-name = ""
+ maas-domain = ""
+ maas-worker-nodes = 1
+ maas-worker-resource-pool = ""
+ maas-worker-azs = []
+ maas-worker-node-tags = []
+ maas-control-plane-nodes = 1
+ maas-control-plane-resource-pool = ""
+ maas-control-plane-azs = []
+ maas-control-plane-node-tags = []
+}
+
+mock_provider "spectrocloud" {
+}
+
+run "verify_maas" {
+
+ command = plan
+
+ expect_failures = [
+ var.pcg-name,
+ var.maas-domain,
+ var.maas-worker-resource-pool,
+ var.maas-worker-azs,
+ var.maas-worker-node-tags,
+ var.maas-control-plane-resource-pool,
+ var.maas-control-plane-azs,
+ var.maas-control-plane-node-tags
+ ]
+}
diff --git a/terraform/vmo-cluster/tests/maas-cluster-replace-values.tftest.hcl b/terraform/vmo-cluster/tests/maas-cluster-replace-values.tftest.hcl
new file mode 100644
index 0000000..c89a7b9
--- /dev/null
+++ b/terraform/vmo-cluster/tests/maas-cluster-replace-values.tftest.hcl
@@ -0,0 +1,37 @@
+# Copyright (c) Spectro Cloud
+# SPDX-License-Identifier: Apache-2.0
+# Test case 5 - Verify PCG name, domain, resource pools, AZs and node tags cannot have REPLACE ME values.
+
+variables {
+ deploy-maas = true
+ deploy-maas-vm = false
+ pcg-name = "REPLACE ME"
+ maas-domain = "REPLACE ME"
+ maas-worker-nodes = 1
+ maas-worker-resource-pool = "REPLACE ME"
+ maas-worker-azs = ["REPLACE ME"]
+ maas-worker-node-tags = ["REPLACE ME"]
+ maas-control-plane-nodes = 1
+ maas-control-plane-resource-pool = "REPLACE ME"
+ maas-control-plane-azs = ["REPLACE ME"]
+ maas-control-plane-node-tags = ["REPLACE ME"]
+}
+
+mock_provider "spectrocloud" {
+}
+
+run "verify_maas" {
+
+ command = plan
+
+ expect_failures = [
+ var.pcg-name,
+ var.maas-domain,
+ var.maas-worker-resource-pool,
+ var.maas-worker-azs,
+ var.maas-worker-node-tags,
+ var.maas-control-plane-resource-pool,
+ var.maas-control-plane-azs,
+ var.maas-control-plane-node-tags
+ ]
+}
diff --git a/terraform/vmo-cluster/tests/maas-cluster-vm.tftest.hcl b/terraform/vmo-cluster/tests/maas-cluster-vm.tftest.hcl
new file mode 100644
index 0000000..60f9ae4
--- /dev/null
+++ b/terraform/vmo-cluster/tests/maas-cluster-vm.tftest.hcl
@@ -0,0 +1,42 @@
+# Copyright (c) Spectro Cloud
+# SPDX-License-Identifier: Apache-2.0
+# Test case 3 - Verify MAAS profile, cluster and VM are correctly planned when values are provided.
+
+variables {
+ deploy-maas = true
+ deploy-maas-vm = true
+ pcg-name = "test-pcg"
+ maas-domain = "test-domain"
+ maas-worker-nodes = 1
+ maas-worker-resource-pool = "test-worker-pool"
+ maas-worker-azs = ["test-worker-az"]
+ maas-worker-node-tags = ["test-worker-tags"]
+ maas-control-plane-nodes = 1
+ maas-control-plane-resource-pool = "test-cp-pool"
+ maas-control-plane-azs = ["test-cp-az"]
+ maas-control-plane-node-tags = ["test-cp-tags"]
+}
+
+mock_provider "spectrocloud" {
+}
+
+run "verify_maas" {
+
+ command = plan
+
+ assert {
+ condition = length(spectrocloud_cluster_profile.maas-vmo-profile) == 1
+ error_message = "No MAAS cluster profile was created"
+ }
+
+ assert {
+ condition = length(spectrocloud_cluster_maas.maas-cluster) == 1
+ error_message = "No MAAS cluster was created"
+ }
+
+ assert {
+ condition = length(spectrocloud_virtual_machine.virtual-machine) == 1
+ error_message = "No MAAS VM was created"
+ }
+
+}
diff --git a/terraform/vmo-cluster/tests/maas-cluster-zero-nodes.tftest.hcl b/terraform/vmo-cluster/tests/maas-cluster-zero-nodes.tftest.hcl
new file mode 100644
index 0000000..3d24108
--- /dev/null
+++ b/terraform/vmo-cluster/tests/maas-cluster-zero-nodes.tftest.hcl
@@ -0,0 +1,31 @@
+# Copyright (c) Spectro Cloud
+# SPDX-License-Identifier: Apache-2.0
+# Test case 6 - Verify control plane and worker nodes cannot be set to 0.
+
+variables {
+ deploy-maas = true
+ deploy-maas-vm = false
+ pcg-name = "test-pcg"
+ maas-domain = "test-domain"
+ maas-worker-nodes = 0
+ maas-worker-resource-pool = "test-worker-pool"
+ maas-worker-azs = ["test-worker-az"]
+ maas-worker-node-tags = ["test-worker-tags"]
+ maas-control-plane-nodes = 0
+ maas-control-plane-resource-pool = "test-cp-pool"
+ maas-control-plane-azs = ["test-cp-az"]
+ maas-control-plane-node-tags = ["test-cp-tags"]
+}
+
+mock_provider "spectrocloud" {
+}
+
+run "verify_maas" {
+
+ command = plan
+
+ expect_failures = [
+ var.maas-worker-nodes,
+ var.maas-control-plane-nodes
+ ]
+}
diff --git a/terraform/vmo-cluster/tests/maas-cluster.tftest.hcl b/terraform/vmo-cluster/tests/maas-cluster.tftest.hcl
new file mode 100644
index 0000000..6b2b426
--- /dev/null
+++ b/terraform/vmo-cluster/tests/maas-cluster.tftest.hcl
@@ -0,0 +1,37 @@
+# Copyright (c) Spectro Cloud
+# SPDX-License-Identifier: Apache-2.0
+# Test case 2 - Verify MAAS profile and cluster are correctly planned when values are provided.
+
+variables {
+ deploy-maas = true
+ deploy-maas-vm = false
+ pcg-name = "test-pcg"
+ maas-domain = "test-domain"
+ maas-worker-nodes = 1
+ maas-worker-resource-pool = "test-worker-pool"
+ maas-worker-azs = ["test-worker-az"]
+ maas-worker-node-tags = ["test-worker-tags"]
+ maas-control-plane-nodes = 1
+ maas-control-plane-resource-pool = "test-cp-pool"
+ maas-control-plane-azs = ["test-cp-az"]
+ maas-control-plane-node-tags = ["test-cp-tags"]
+}
+
+mock_provider "spectrocloud" {
+}
+
+run "verify_maas" {
+
+ command = plan
+
+ assert {
+ condition = length(spectrocloud_cluster_profile.maas-vmo-profile) == 1
+ error_message = "No MAAS cluster profile was created"
+ }
+
+ assert {
+ condition = length(spectrocloud_cluster_maas.maas-cluster) == 1
+ error_message = "No MAAS cluster was created"
+ }
+
+}
diff --git a/terraform/vmo-cluster/tests/project-palette.tftest.hcl b/terraform/vmo-cluster/tests/project-palette.tftest.hcl
new file mode 100644
index 0000000..49de946
--- /dev/null
+++ b/terraform/vmo-cluster/tests/project-palette.tftest.hcl
@@ -0,0 +1,16 @@
+# Copyright (c) Spectro Cloud
+# SPDX-License-Identifier: Apache-2.0
+# Test case 1 - Verify Palette Project is not allowed empty value.
+
+variables {
+ palette-project = ""
+}
+
+run "project_variable" {
+
+ command = plan
+
+ expect_failures = [
+ var.palette-project
+ ]
+}
diff --git a/terraform/vmo-cluster/virtual-machines/cloud-init b/terraform/vmo-cluster/virtual-machines/cloud-init
new file mode 100644
index 0000000..935c9a1
--- /dev/null
+++ b/terraform/vmo-cluster/virtual-machines/cloud-init
@@ -0,0 +1,22 @@
+#cloud-config
+ssh_pwauth: True
+chpasswd: { expire: False }
+password: spectro
+disable_root: false
+runcmd:
+ - apt-get update
+ - apt-get install -y qemu-guest-agent
+ - systemctl start qemu-guest-agent
+ - |
+ apt-get -y install ca-certificates curl
+ install -m 0755 -d /etc/apt/keyrings
+ curl -fsSL https://download.docker.com/linux/ubuntu/gpg -o /etc/apt/keyrings/docker.asc
+ chmod a+r /etc/apt/keyrings/docker.asc
+ echo \
+ "deb [arch=$(dpkg --print-architecture) signed-by=/etc/apt/keyrings/docker.asc] https://download.docker.com/linux/ubuntu \
+ $(. /etc/os-release && echo "$VERSION_CODENAME") stable" | \
+ tee /etc/apt/sources.list.d/docker.list > /dev/null
+ apt-get update
+ apt-get install -y docker-ce docker-ce-cli containerd.io docker-buildx-plugin docker-compose-plugin
+ groupadd docker
+ gpasswd -a ubuntu docker
diff --git a/terraform/vmo-cluster/virtual_machines.tf b/terraform/vmo-cluster/virtual_machines.tf
new file mode 100644
index 0000000..9a07542
--- /dev/null
+++ b/terraform/vmo-cluster/virtual_machines.tf
@@ -0,0 +1,106 @@
+
+##########################
+# MAAS Virtual Machine
+##########################
+resource "spectrocloud_virtual_machine" "virtual-machine" {
+ count = var.deploy-maas-vm ? 1 : 0
+ depends_on = [spectrocloud_cluster_maas.maas-cluster]
+
+ cluster_uid = data.spectrocloud_cluster.maas_vmo_cluster[0].id
+ cluster_context = data.spectrocloud_cluster.maas_vmo_cluster[0].context
+
+ run_on_launch = true
+ namespace = "default"
+ name = "ubuntu-tutorial-vm"
+
+ timeouts {
+ create = "60m"
+ }
+
+ labels = {
+ "tf" = "spectrocloud-tutorials"
+ "kubevirt.io/vm" = "ubuntu-tutorial-vm"
+ }
+
+ data_volume_templates {
+ metadata {
+ name = "ubuntu-tutorial-vm"
+ }
+ spec {
+ source {
+ pvc {
+ name = "template-ubuntu-2204"
+ namespace = "vmo-golden-images"
+ }
+ }
+ pvc {
+ access_modes = ["ReadWriteMany"]
+ resources {
+ requests = {
+ storage = "50Gi"
+ }
+ }
+ storage_class_name = "ceph-block"
+ volume_mode = "Block"
+ }
+ }
+ }
+
+ volume {
+ name = "ubuntu-tutorial-vm"
+ volume_source {
+ data_volume {
+ name = "ubuntu-tutorial-vm"
+ }
+ }
+ }
+
+ volume {
+ name = "cloudinitdisk"
+ volume_source {
+ cloud_init_no_cloud {
+ user_data = file("virtual-machines/cloud-init")
+ }
+ }
+ }
+
+ disk {
+ name = "ubuntu-tutorial-vm"
+ disk_device {
+ disk {
+ bus = "virtio"
+ }
+ }
+ }
+ disk {
+ name = "cloudinitdisk"
+ disk_device {
+ disk {
+ bus = "virtio"
+ }
+ }
+ }
+
+ cpu {
+ cores = 2
+ sockets = 1
+ threads = 1
+ }
+ memory {
+ guest = "4Gi"
+ }
+
+ resources {}
+
+ interface {
+ name = "default"
+ interface_binding_method = "InterfaceMasquerade"
+ }
+
+ network {
+ name = "default"
+ network_source {
+ pod {}
+ }
+ }
+}