From e9e9d568688f63c9d6764ec61a2f1a4b494a1d14 Mon Sep 17 00:00:00 2001 From: Muse Mulatu Date: Thu, 10 Oct 2024 10:37:57 -0600 Subject: [PATCH] feat: create module for creating kubernetes cluster --- .../gpu-cluster/infrastructure/workspace.yaml | 2 + .../infrastructure/workspace.yaml | 2 + .../civo/modules/civo-cluster/main.tf | 222 +++++++++++++++++ .../civo/modules/civo-cluster/output.tf | 7 + .../civo/modules/civo-cluster/provider.tf | 8 + .../civo/modules/civo-cluster/variables.tf | 32 +++ .../civo/modules/workload-cluster/main.tf | 233 +----------------- .../modules/workload-cluster/variables.tf | 5 + .../gpu.tf | 11 +- .../civo/modules/workload-gpu-cluster/main.tf | 34 +++ .../modules/workload-gpu-cluster/provider.tf | 8 + .../modules/workload-gpu-cluster/variables.tf | 24 ++ 12 files changed, 359 insertions(+), 229 deletions(-) create mode 100644 civo-github/terraform/civo/modules/civo-cluster/main.tf create mode 100644 civo-github/terraform/civo/modules/civo-cluster/output.tf create mode 100644 civo-github/terraform/civo/modules/civo-cluster/provider.tf create mode 100644 civo-github/terraform/civo/modules/civo-cluster/variables.tf rename civo-github/terraform/civo/modules/{workload-cluster => workload-gpu-cluster}/gpu.tf (95%) create mode 100644 civo-github/terraform/civo/modules/workload-gpu-cluster/main.tf create mode 100644 civo-github/terraform/civo/modules/workload-gpu-cluster/provider.tf create mode 100644 civo-github/terraform/civo/modules/workload-gpu-cluster/variables.tf diff --git a/civo-github/templates/gpu-cluster/infrastructure/workspace.yaml b/civo-github/templates/gpu-cluster/infrastructure/workspace.yaml index e5835a10a..9dd93a84d 100644 --- a/civo-github/templates/gpu-cluster/infrastructure/workspace.yaml +++ b/civo-github/templates/gpu-cluster/infrastructure/workspace.yaml @@ -19,3 +19,5 @@ spec: value: "" - key: node_type value: + - key: cluster_type + value: "talos" diff --git a/civo-github/templates/workload-cluster/infrastructure/workspace.yaml b/civo-github/templates/workload-cluster/infrastructure/workspace.yaml index e5835a10a..1e28421e9 100644 --- a/civo-github/templates/workload-cluster/infrastructure/workspace.yaml +++ b/civo-github/templates/workload-cluster/infrastructure/workspace.yaml @@ -19,3 +19,5 @@ spec: value: "" - key: node_type value: + - key: cluster_type + value: "k3s" \ No newline at end of file diff --git a/civo-github/terraform/civo/modules/civo-cluster/main.tf b/civo-github/terraform/civo/modules/civo-cluster/main.tf new file mode 100644 index 000000000..e34f464ba --- /dev/null +++ b/civo-github/terraform/civo/modules/civo-cluster/main.tf @@ -0,0 +1,222 @@ +resource "civo_network" "kubefirst" { + label = var.cluster_name +} + +resource "civo_firewall" "kubefirst" { + name = var.cluster_name + network_id = civo_network.kubefirst.id + create_default_rules = true +} + +resource "civo_kubernetes_cluster" "kubefirst" { + name = var.cluster_name + network_id = civo_network.kubefirst.id + firewall_id = civo_firewall.kubefirst.id + write_kubeconfig = true + cluster_type = var.cluster_type + pools { + label = var.cluster_name + size = var.node_type + node_count = var.node_count + labels = var.labels + } +} + +resource "vault_generic_secret" "clusters" { + path = "secret/clusters/${var.cluster_name}" + + data_json = jsonencode( + { + kubeconfig = civo_kubernetes_cluster.kubefirst.kubeconfig + client_certificate = base64decode(yamldecode(civo_kubernetes_cluster.kubefirst.kubeconfig).users[0].user.client-certificate-data) + client_key = base64decode(yamldecode(civo_kubernetes_cluster.kubefirst.kubeconfig).users[0].user.client-key-data) + cluster_ca_certificate = base64decode(yamldecode(civo_kubernetes_cluster.kubefirst.kubeconfig).clusters[0].cluster.certificate-authority-data) + host = civo_kubernetes_cluster.kubefirst.api_endpoint + cluster_name = var.cluster_name + argocd_manager_sa_token = kubernetes_secret_v1.argocd_manager.data.token + } + ) +} + +provider "kubernetes" { + host = civo_kubernetes_cluster.kubefirst.api_endpoint + client_certificate = base64decode(yamldecode(civo_kubernetes_cluster.kubefirst.kubeconfig).users[0].user.client-certificate-data) + client_key = base64decode(yamldecode(civo_kubernetes_cluster.kubefirst.kubeconfig).users[0].user.client-key-data) + cluster_ca_certificate = base64decode(yamldecode(civo_kubernetes_cluster.kubefirst.kubeconfig).clusters[0].cluster.certificate-authority-data) +} + +provider "helm" { + repository_config_path = "${path.module}/.helm/repositories.yaml" + repository_cache = "${path.module}/.helm" + kubernetes { + host = civo_kubernetes_cluster.kubefirst.api_endpoint + client_certificate = base64decode(yamldecode(civo_kubernetes_cluster.kubefirst.kubeconfig).users[0].user.client-certificate-data) + client_key = base64decode(yamldecode(civo_kubernetes_cluster.kubefirst.kubeconfig).users[0].user.client-key-data) + cluster_ca_certificate = base64decode(yamldecode(civo_kubernetes_cluster.kubefirst.kubeconfig).clusters[0].cluster.certificate-authority-data) + } +} + +resource "kubernetes_cluster_role_v1" "argocd_manager" { + metadata { + name = "argocd-manager-role" + } + + rule { + api_groups = ["*"] + resources = ["*"] + verbs = ["*"] + } + rule { + non_resource_urls = ["*"] + verbs = ["*"] + } +} + + +resource "kubernetes_cluster_role_binding_v1" "argocd_manager" { + metadata { + name = "argocd-manager-role-binding" + } + role_ref { + api_group = "rbac.authorization.k8s.io" + kind = "ClusterRole" + name = kubernetes_cluster_role_v1.argocd_manager.metadata.0.name + } + subject { + kind = "ServiceAccount" + name = kubernetes_service_account_v1.argocd_manager.metadata.0.name + namespace = "kube-system" + } +} + +resource "kubernetes_service_account_v1" "argocd_manager" { + metadata { + name = "argocd-manager" + namespace = "kube-system" + } + secret { + name = "argocd-manager-token" + } +} + +resource "kubernetes_secret_v1" "argocd_manager" { + metadata { + name = "argocd-manager-token" + namespace = "kube-system" + annotations = { + "kubernetes.io/service-account.name" = "argocd-manager" + } + } + type = "kubernetes.io/service-account-token" + depends_on = [kubernetes_service_account_v1.argocd_manager] +} + +resource "kubernetes_namespace_v1" "external_dns" { + metadata { + name = "external-dns" + } +} + +data "vault_generic_secret" "external_dns" { + path = "secret/external-dns" +} + +resource "kubernetes_secret_v1" "external_dns" { + metadata { + name = "external-dns-secrets" + namespace = kubernetes_namespace_v1.external_dns.metadata.0.name + } + data = { + token = data.vault_generic_secret.external_dns.data["token"] + } + type = "Opaque" +} + + +resource "kubernetes_namespace_v1" "external_secrets_operator" { + metadata { + name = "external-secrets-operator" + } +} + +resource "kubernetes_namespace_v1" "environment" { + metadata { + name = var.cluster_name + } +} + +data "vault_generic_secret" "docker_config" { + path = "secret/dockerconfigjson" +} + +resource "kubernetes_secret_v1" "image_pull" { + metadata { + name = "docker-config" + namespace = kubernetes_namespace_v1.environment.metadata.0.name + } + + data = { + ".dockerconfigjson" = data.vault_generic_secret.docker_config.data["dockerconfig"] + } + + type = "kubernetes.io/dockerconfigjson" +} + +data "vault_generic_secret" "external_secrets_operator" { + path = "secret/atlantis" +} + +resource "kubernetes_secret_v1" "external_secrets_operator_environment" { + metadata { + name = "${var.cluster_name}-cluster-vault-bootstrap" + namespace = kubernetes_namespace_v1.environment.metadata.0.name + } + data = { + vault-token = data.vault_generic_secret.external_secrets_operator.data["VAULT_TOKEN"] + } + type = "Opaque" +} + +resource "kubernetes_secret_v1" "external_secrets_operator" { + metadata { + name = "${var.cluster_name}-cluster-vault-bootstrap" + namespace = kubernetes_namespace_v1.external_secrets_operator.metadata.0.name + } + data = { + vault-token = data.vault_generic_secret.external_secrets_operator.data["VAULT_TOKEN"] + } + type = "Opaque" +} + +resource "kubernetes_service_account_v1" "external_secrets" { + metadata { + name = "external-secrets" + namespace = kubernetes_namespace_v1.external_secrets_operator.metadata.0.name + } + secret { + name = "external-secrets-token" + } +} + +resource "kubernetes_secret_v1" "external_secrets" { + metadata { + name = "external-secrets-token" + namespace = kubernetes_namespace_v1.external_secrets_operator.metadata.0.name + annotations = { + "kubernetes.io/service-account.name" = "external-secrets" + } + } + type = "kubernetes.io/service-account-token" + depends_on = [kubernetes_service_account_v1.external_secrets] +} + +resource "kubernetes_config_map" "kubefirst_cm" { + metadata { + name = "kubefirst-cm" + namespace = "kube-system" + } + + data = { + mgmt_cluster_id = var.mgmt_cluster_id + } +} diff --git a/civo-github/terraform/civo/modules/civo-cluster/output.tf b/civo-github/terraform/civo/modules/civo-cluster/output.tf new file mode 100644 index 000000000..7307b0ead --- /dev/null +++ b/civo-github/terraform/civo/modules/civo-cluster/output.tf @@ -0,0 +1,7 @@ +output "kubeconfig" { + value = civo_kubernetes_cluster.kubefirst.kubeconfig +} + +output "api_endpoint" { + value = civo_kubernetes_cluster.kubefirst.api_endpoint +} \ No newline at end of file diff --git a/civo-github/terraform/civo/modules/civo-cluster/provider.tf b/civo-github/terraform/civo/modules/civo-cluster/provider.tf new file mode 100644 index 000000000..d1bdb3e56 --- /dev/null +++ b/civo-github/terraform/civo/modules/civo-cluster/provider.tf @@ -0,0 +1,8 @@ +terraform { + required_providers { + civo = { + source = "civo/civo" + version = "~> 1.1.0" + } + } +} \ No newline at end of file diff --git a/civo-github/terraform/civo/modules/civo-cluster/variables.tf b/civo-github/terraform/civo/modules/civo-cluster/variables.tf new file mode 100644 index 000000000..7aa52b987 --- /dev/null +++ b/civo-github/terraform/civo/modules/civo-cluster/variables.tf @@ -0,0 +1,32 @@ +variable "cluster_name" { + type = string +} + +variable "cluster_region" { + type = string +} + +variable "environment" { + type = string +} + +variable "node_count" { + type = number +} + +variable "node_type" { + type = string +} + +variable "cluster_type" { + type = string + description = "type of cluster talos/k3s" +} + +variable "labels" { + type = map(string) +} + +variable "mgmt_cluster_id" { + type = string +} \ No newline at end of file diff --git a/civo-github/terraform/civo/modules/workload-cluster/main.tf b/civo-github/terraform/civo/modules/workload-cluster/main.tf index c63d811b9..12aa27a2d 100644 --- a/civo-github/terraform/civo/modules/workload-cluster/main.tf +++ b/civo-github/terraform/civo/modules/workload-cluster/main.tf @@ -1,224 +1,11 @@ -resource "civo_network" "kubefirst" { - label = var.cluster_name -} - -resource "civo_firewall" "kubefirst" { - name = var.cluster_name - network_id = civo_network.kubefirst.id - create_default_rules = true -} - -resource "civo_kubernetes_cluster" "kubefirst" { - name = var.cluster_name - network_id = civo_network.kubefirst.id - firewall_id = civo_firewall.kubefirst.id - write_kubeconfig = true - cluster_type = local.is_gpu ? "talos" : "k3s" # k3s doesn't support GPU - pools { - label = var.cluster_name - size = var.node_type - node_count = var.node_count - labels = local.is_gpu ? { - "nvidia.com/gpu.deploy.operator-validator" = "false" - } : {} - } -} - -resource "vault_generic_secret" "clusters" { - path = "secret/clusters/${var.cluster_name}" - - data_json = jsonencode( - { - kubeconfig = civo_kubernetes_cluster.kubefirst.kubeconfig - client_certificate = base64decode(yamldecode(civo_kubernetes_cluster.kubefirst.kubeconfig).users[0].user.client-certificate-data) - client_key = base64decode(yamldecode(civo_kubernetes_cluster.kubefirst.kubeconfig).users[0].user.client-key-data) - cluster_ca_certificate = base64decode(yamldecode(civo_kubernetes_cluster.kubefirst.kubeconfig).clusters[0].cluster.certificate-authority-data) - host = civo_kubernetes_cluster.kubefirst.api_endpoint - cluster_name = var.cluster_name - argocd_manager_sa_token = kubernetes_secret_v1.argocd_manager.data.token - } - ) -} - -provider "kubernetes" { - host = civo_kubernetes_cluster.kubefirst.api_endpoint - client_certificate = base64decode(yamldecode(civo_kubernetes_cluster.kubefirst.kubeconfig).users[0].user.client-certificate-data) - client_key = base64decode(yamldecode(civo_kubernetes_cluster.kubefirst.kubeconfig).users[0].user.client-key-data) - cluster_ca_certificate = base64decode(yamldecode(civo_kubernetes_cluster.kubefirst.kubeconfig).clusters[0].cluster.certificate-authority-data) -} - -provider "helm" { - repository_config_path = "${path.module}/.helm/repositories.yaml" - repository_cache = "${path.module}/.helm" - kubernetes { - host = civo_kubernetes_cluster.kubefirst.api_endpoint - client_certificate = base64decode(yamldecode(civo_kubernetes_cluster.kubefirst.kubeconfig).users[0].user.client-certificate-data) - client_key = base64decode(yamldecode(civo_kubernetes_cluster.kubefirst.kubeconfig).users[0].user.client-key-data) - cluster_ca_certificate = base64decode(yamldecode(civo_kubernetes_cluster.kubefirst.kubeconfig).clusters[0].cluster.certificate-authority-data) - } -} - -resource "kubernetes_cluster_role_v1" "argocd_manager" { - metadata { - name = "argocd-manager-role" - } - - rule { - api_groups = ["*"] - resources = ["*"] - verbs = ["*"] - } - rule { - non_resource_urls = ["*"] - verbs = ["*"] - } -} - - -resource "kubernetes_cluster_role_binding_v1" "argocd_manager" { - metadata { - name = "argocd-manager-role-binding" - } - role_ref { - api_group = "rbac.authorization.k8s.io" - kind = "ClusterRole" - name = kubernetes_cluster_role_v1.argocd_manager.metadata.0.name - } - subject { - kind = "ServiceAccount" - name = kubernetes_service_account_v1.argocd_manager.metadata.0.name - namespace = "kube-system" - } -} - -resource "kubernetes_service_account_v1" "argocd_manager" { - metadata { - name = "argocd-manager" - namespace = "kube-system" - } - secret { - name = "argocd-manager-token" - } -} - -resource "kubernetes_secret_v1" "argocd_manager" { - metadata { - name = "argocd-manager-token" - namespace = "kube-system" - annotations = { - "kubernetes.io/service-account.name" = "argocd-manager" - } - } - type = "kubernetes.io/service-account-token" - depends_on = [kubernetes_service_account_v1.argocd_manager] -} - -resource "kubernetes_namespace_v1" "external_dns" { - metadata { - name = "external-dns" - } -} - -data "vault_generic_secret" "external_dns" { - path = "secret/external-dns" -} - -resource "kubernetes_secret_v1" "external_dns" { - metadata { - name = "external-dns-secrets" - namespace = kubernetes_namespace_v1.external_dns.metadata.0.name - } - data = { - token = data.vault_generic_secret.external_dns.data["token"] - } - type = "Opaque" -} - - -resource "kubernetes_namespace_v1" "external_secrets_operator" { - metadata { - name = "external-secrets-operator" - } -} - -resource "kubernetes_namespace_v1" "environment" { - metadata { - name = var.cluster_name - } -} - -data "vault_generic_secret" "docker_config" { - path = "secret/dockerconfigjson" -} - -resource "kubernetes_secret_v1" "image_pull" { - metadata { - name = "docker-config" - namespace = kubernetes_namespace_v1.environment.metadata.0.name - } - - data = { - ".dockerconfigjson" = data.vault_generic_secret.docker_config.data["dockerconfig"] - } - - type = "kubernetes.io/dockerconfigjson" -} - -data "vault_generic_secret" "external_secrets_operator" { - path = "secret/atlantis" -} - -resource "kubernetes_secret_v1" "external_secrets_operator_environment" { - metadata { - name = "${var.cluster_name}-cluster-vault-bootstrap" - namespace = kubernetes_namespace_v1.environment.metadata.0.name - } - data = { - vault-token = data.vault_generic_secret.external_secrets_operator.data["VAULT_TOKEN"] - } - type = "Opaque" -} - -resource "kubernetes_secret_v1" "external_secrets_operator" { - metadata { - name = "${var.cluster_name}-cluster-vault-bootstrap" - namespace = kubernetes_namespace_v1.external_secrets_operator.metadata.0.name - } - data = { - vault-token = data.vault_generic_secret.external_secrets_operator.data["VAULT_TOKEN"] - } - type = "Opaque" -} - -resource "kubernetes_service_account_v1" "external_secrets" { - metadata { - name = "external-secrets" - namespace = kubernetes_namespace_v1.external_secrets_operator.metadata.0.name - } - secret { - name = "external-secrets-token" - } -} - -resource "kubernetes_secret_v1" "external_secrets" { - metadata { - name = "external-secrets-token" - namespace = kubernetes_namespace_v1.external_secrets_operator.metadata.0.name - annotations = { - "kubernetes.io/service-account.name" = "external-secrets" - } - } - type = "kubernetes.io/service-account-token" - depends_on = [kubernetes_service_account_v1.external_secrets] -} - -resource "kubernetes_config_map" "kubefirst_cm" { - metadata { - name = "kubefirst-cm" - namespace = "kube-system" - } - - data = { - mgmt_cluster_id = "" - } +module "civo_kubernetes_cluster" { + source = "${path.module}/../civo-cluster" + cluster_name = var.cluster_name + cluster_region = var.cluster_region + environment = var.environment + node_count = var.node_count + node_type = var.node_type + cluster_type = var.cluster_type + labels = {} + mgmt_cluster_id = "" } diff --git a/civo-github/terraform/civo/modules/workload-cluster/variables.tf b/civo-github/terraform/civo/modules/workload-cluster/variables.tf index 7a7c18c00..e565eec53 100644 --- a/civo-github/terraform/civo/modules/workload-cluster/variables.tf +++ b/civo-github/terraform/civo/modules/workload-cluster/variables.tf @@ -17,3 +17,8 @@ variable "node_count" { variable "node_type" { type = string } + +variable "cluster_type" { + type = string + description = "type of cluster talos/k3s" +} diff --git a/civo-github/terraform/civo/modules/workload-cluster/gpu.tf b/civo-github/terraform/civo/modules/workload-gpu-cluster/gpu.tf similarity index 95% rename from civo-github/terraform/civo/modules/workload-cluster/gpu.tf rename to civo-github/terraform/civo/modules/workload-gpu-cluster/gpu.tf index c4b25480d..dc827a719 100644 --- a/civo-github/terraform/civo/modules/workload-cluster/gpu.tf +++ b/civo-github/terraform/civo/modules/workload-gpu-cluster/gpu.tf @@ -21,22 +21,21 @@ data "civo_size" "gpu" { # Create labels for the GPU operator namespace resource "kubernetes_namespace_v1" "gpu_operator_labels" { - count = local.is_gpu ? 1 : 0 metadata { name = "gpu-operator" labels = { "pod-security.kubernetes.io/enforce" = "privileged" } } + depends_on = [module.civo_kubernetes_cluster] } # Helm release configuration for the Nvidia GPU operator resource "helm_release" "gpu_operator" { - count = local.is_gpu ? 1 : 0 name = "gpu-operator" repository = "https://helm.ngc.nvidia.com/nvidia" chart = "gpu-operator" - namespace = kubernetes_namespace_v1.gpu_operator_labels[count.index].metadata[0].name + namespace = kubernetes_namespace_v1.gpu_operator_labels.metadata[0].name version = "v24.6.0" atomic = true cleanup_on_fail = true @@ -212,15 +211,15 @@ resource "helm_release" "gpu_operator" { } }) ] + depends_on = [module.civo_kubernetes_cluster] } # as the host driver and the nvidia container toolkit are provided within Talos as Shims, # we need to create a daemonset that forces these to be marked as ready for the GPU operator # TODO: "productionise" this resource "kubernetes_daemonset" "fake_toolkit_ready" { - count = local.is_gpu ? 1 : 0 metadata { name = "fake-toolkit-ready" - namespace = kubernetes_namespace_v1.gpu_operator_labels[count.index].metadata[0].name + namespace = kubernetes_namespace_v1.gpu_operator_labels.metadata[0].name } spec { selector { @@ -286,5 +285,5 @@ resource "kubernetes_daemonset" "fake_toolkit_ready" { } } } - depends_on = [helm_release.gpu_operator] + depends_on = [helm_release.gpu_operator, module.civo_kubernetes_cluster] } diff --git a/civo-github/terraform/civo/modules/workload-gpu-cluster/main.tf b/civo-github/terraform/civo/modules/workload-gpu-cluster/main.tf new file mode 100644 index 000000000..b2f2f1668 --- /dev/null +++ b/civo-github/terraform/civo/modules/workload-gpu-cluster/main.tf @@ -0,0 +1,34 @@ +module "civo_kubernetes_cluster" { + source = "${path.module}/../civo-cluster" + cluster_name = var.cluster_name + cluster_region = var.cluster_region + environment = var.environment + node_count = var.node_count + node_type = var.node_type + cluster_type = var.cluster_type + labels = { + "nvidia.com/gpu.deploy.operator-validator" = "false" + } + mgmt_cluster_id = "" +} + + +// TODO Fix: I don't like the idea we are configuring the provider here and inside the module + +provider "kubernetes" { + host = module.civo_kubernetes_cluster.api_endpoint + client_certificate = base64decode(yamldecode(module.civo_kubernetes_cluster.kubeconfig).users[0].user.client-certificate-data) + client_key = base64decode(yamldecode(module.civo_kubernetes_cluster.kubeconfig).users[0].user.client-key-data) + cluster_ca_certificate = base64decode(yamldecode(module.civo_kubernetes_cluster.kubeconfig).clusters[0].cluster.certificate-authority-data) +} + +provider "helm" { + repository_config_path = "${path.module}/.helm/repositories.yaml" + repository_cache = "${path.module}/.helm" + kubernetes { + host = module.civo_kubernetes_cluster.api_endpoint + client_certificate = base64decode(yamldecode(module.civo_kubernetes_cluster.kubeconfig).users[0].user.client-certificate-data) + client_key = base64decode(yamldecode(module.civo_kubernetes_cluster.kubeconfig).users[0].user.client-key-data) + cluster_ca_certificate = base64decode(yamldecode(module.civo_kubernetes_cluster.kubeconfig).clusters[0].cluster.certificate-authority-data) + } +} \ No newline at end of file diff --git a/civo-github/terraform/civo/modules/workload-gpu-cluster/provider.tf b/civo-github/terraform/civo/modules/workload-gpu-cluster/provider.tf new file mode 100644 index 000000000..93ce6c8d4 --- /dev/null +++ b/civo-github/terraform/civo/modules/workload-gpu-cluster/provider.tf @@ -0,0 +1,8 @@ +terraform { + required_providers { + civo = { + source = "civo/civo" + version = "1.1.2" + } + } +} \ No newline at end of file diff --git a/civo-github/terraform/civo/modules/workload-gpu-cluster/variables.tf b/civo-github/terraform/civo/modules/workload-gpu-cluster/variables.tf new file mode 100644 index 000000000..e565eec53 --- /dev/null +++ b/civo-github/terraform/civo/modules/workload-gpu-cluster/variables.tf @@ -0,0 +1,24 @@ +variable "cluster_name" { + type = string +} + +variable "cluster_region" { + type = string +} + +variable "environment" { + type = string +} + +variable "node_count" { + type = number +} + +variable "node_type" { + type = string +} + +variable "cluster_type" { + type = string + description = "type of cluster talos/k3s" +}