diff --git a/GNUmakefile b/GNUmakefile index 660de6637c..4f2e07573d 100644 --- a/GNUmakefile +++ b/GNUmakefile @@ -33,6 +33,18 @@ depscheck: @git diff --exit-code -- vendor || \ (echo; echo "Unexpected difference in vendor/ directory. Run 'go mod vendor' command or revert any go.mod/go.sum/vendor changes and commit."; exit 1) +examples-lint: tools + @echo "==> Checking _examples dir formatting..." + @./scripts/fmt-examples.sh || (echo; \ + echo "Terraform formatting errors found in _examples dir."; \ + echo "To see the full differences, run: ./scripts/fmt-examples.sh diff"; \ + echo "To automatically fix the formatting, run 'make examples-lint-fix' and commit the changes."; \ + exit 1) + +examples-lint-fix: tools + @echo "==> Fixing terraform formatting of _examples dir..." + @./scripts/fmt-examples.sh fix + fmt: gofmt -w $(GOFMT_FILES) diff --git a/_examples/aks/README.md b/_examples/aks/README.md new file mode 100644 index 0000000000..5340fcc574 --- /dev/null +++ b/_examples/aks/README.md @@ -0,0 +1,60 @@ +# AKS (Azure Kubernetes Service) + +This example shows how to use the Terraform Kubernetes Provider and Terraform Helm Provider to configure an AKS cluster. The example config in this directory builds the AKS cluster and applies the Kubernetes configurations in a single operation. This guide will also show you how to make changes to the underlying AKS cluster in such a way that Kuberntes/Helm resources are recreated after the underlying cluster is replaced. + +You will need the following environment variables to be set: + + - `ARM_SUBSCRIPTION_ID` + - `ARM_TENANT_ID` + - `ARM_CLIENT_ID` + - `ARM_CLIENT_SECRET` + +Ensure that `KUBE_CONFIG_FILE` and `KUBE_CONFIG_FILES` environment variables are NOT set, as they will interfere with the cluster build. + +``` +unset KUBE_CONFIG_FILE +unset KUBE_CONFIG_FILES +``` + +To install the AKS cluster using default values, run terraform init and apply from the directory containing this README. + +``` +terraform init +terraform apply +``` + +## Kubeconfig for manual CLI access + +This example generates a kubeconfig file in the current working directory, which can be used for manual CLI access to the cluster. + +``` +export KUBECONFIG=$(terraform output -raw kubeconfig_path) +kubectl get pods -n test +``` + +However, in a real-world scenario, this config file would have to be replaced periodically as the AKS client certificates eventually expire (see the [Azure documentation](https://docs.microsoft.com/en-us/azure/aks/certificate-rotation) for the exact expiry dates). If the certificates (or other authentication attributes) are replaced, run a targeted `terraform apply` to save the new credentials into state. + +``` +terraform plan -target=module.aks-cluster +terraform apply -target=module.aks-cluster +``` + +Once the targeted apply is finished, the Kubernetes and Helm providers will be available for use again. Run `terraform apply` again (without targeting) to apply any updates to Kubernetes resources. + +``` +terraform plan +terraform apply +``` + +This approach prevents the Kubernetes and Helm providers from attempting to use cached, invalid credentials, which would cause provider configuration errors durring the plan and apply phases. + +## Replacing the AKS cluster and re-creating the Kubernetes / Helm resources + +When the cluster is initially created, the Kubernetes and Helm providers will not be initialized until authentication details are created for the cluster. However, for future operations that may involve replacing the underlying cluster (for example, changing VM sizes), the AKS cluster will have to be targeted without the Kubernetes/Helm providers, as shown below. This is done by removing the `module.kubernetes-config` from Terraform State prior to replacing cluster credentials, to avoid passing outdated credentials into the providers. + +This will create the new cluster and the Kubernetes resources in a single apply. + +``` +terraform state rm module.kubernetes-config +terraform apply +``` diff --git a/_examples/aks/aks-cluster/main.tf b/_examples/aks/aks-cluster/main.tf new file mode 100644 index 0000000000..1a0648910b --- /dev/null +++ b/_examples/aks/aks-cluster/main.tf @@ -0,0 +1,27 @@ +resource "azurerm_resource_group" "test" { + name = var.cluster_name + location = var.location +} + +resource "azurerm_kubernetes_cluster" "test" { + name = var.cluster_name + location = azurerm_resource_group.test.location + resource_group_name = azurerm_resource_group.test.name + dns_prefix = var.cluster_name + + default_node_pool { + name = "default" + node_count = 1 + vm_size = "Standard_DS2_v2" + } + + identity { + type = "SystemAssigned" + } +} + +resource "local_file" "kubeconfig" { + content = azurerm_kubernetes_cluster.test.kube_config_raw + filename = "${path.root}/kubeconfig" +} + diff --git a/_examples/aks/aks-cluster/output.tf b/_examples/aks/aks-cluster/output.tf new file mode 100644 index 0000000000..9bb8518983 --- /dev/null +++ b/_examples/aks/aks-cluster/output.tf @@ -0,0 +1,15 @@ +output "client_cert" { + value = azurerm_kubernetes_cluster.test.kube_config.0.client_certificate +} + +output "client_key" { + value = azurerm_kubernetes_cluster.test.kube_config.0.client_key +} + +output "ca_cert" { + value = azurerm_kubernetes_cluster.test.kube_config.0.cluster_ca_certificate +} + +output "endpoint" { + value = azurerm_kubernetes_cluster.test.kube_config.0.host +} diff --git a/_examples/aks/aks-cluster/variables.tf b/_examples/aks/aks-cluster/variables.tf new file mode 100644 index 0000000000..0f37fe571c --- /dev/null +++ b/_examples/aks/aks-cluster/variables.tf @@ -0,0 +1,15 @@ +variable "kubernetes_version" { + default = "1.18" +} + +variable "workers_count" { + default = "3" +} + +variable "cluster_name" { + type = string +} + +variable "location" { + type = string +} diff --git a/_examples/aks/kubernetes-config/main.tf b/_examples/aks/kubernetes-config/main.tf new file mode 100644 index 0000000000..3eeabc804d --- /dev/null +++ b/_examples/aks/kubernetes-config/main.tf @@ -0,0 +1,56 @@ +resource "kubernetes_namespace" "test" { + metadata { + name = "test" + } +} + +resource "kubernetes_deployment" "test" { + metadata { + name = "test" + namespace= kubernetes_namespace.test.metadata.0.name + } + spec { + replicas = 2 + selector { + match_labels = { + app = "test" + } + } + template { + metadata { + labels = { + app = "test" + } + } + spec { + container { + image = "nginx:1.19.4" + name = "nginx" + + resources { + limits = { + memory = "512M" + cpu = "1" + } + requests = { + memory = "256M" + cpu = "50m" + } + } + } + } + } + } +} + +resource helm_release nginx_ingress { + name = "nginx-ingress-controller" + + repository = "https://charts.bitnami.com/bitnami" + chart = "nginx-ingress-controller" + + set { + name = "service.type" + value = "ClusterIP" + } +} diff --git a/_examples/aks/kubernetes-config/variables.tf b/_examples/aks/kubernetes-config/variables.tf new file mode 100644 index 0000000000..abbf86f798 --- /dev/null +++ b/_examples/aks/kubernetes-config/variables.tf @@ -0,0 +1,3 @@ +variable "cluster_name" { + type = string +} diff --git a/_examples/aks/main.tf b/_examples/aks/main.tf new file mode 100644 index 0000000000..91e08dfaac --- /dev/null +++ b/_examples/aks/main.tf @@ -0,0 +1,50 @@ +terraform { + required_providers { + kubernetes = { + source = "hashicorp/kubernetes" + version = ">= 2.0.0" + } + azurerm = { + source = "hashicorp/azurerm" + version = "2.42" + } + helm = { + source = "hashicorp/helm" + version = ">= 2.0.1" + } + } +} + +provider "kubernetes" { + host = module.aks-cluster.endpoint + client_key = base64decode(module.aks-cluster.client_key) + client_certificate = base64decode(module.aks-cluster.client_cert) + cluster_ca_certificate = base64decode(module.aks-cluster.ca_cert) +} + +provider "helm" { + kubernetes { + host = module.aks-cluster.endpoint + client_key = base64decode(module.aks-cluster.client_key) + client_certificate = base64decode(module.aks-cluster.client_cert) + cluster_ca_certificate = base64decode(module.aks-cluster.ca_cert) + } +} + +provider "azurerm" { + features {} +} + +module "aks-cluster" { + providers = { azurerm = azurerm } + source = "./aks-cluster" + cluster_name = local.cluster_name + location = var.location +} + +module "kubernetes-config" { + providers = { kubernetes = kubernetes, helm = helm } + depends_on = [module.aks-cluster] + source = "./kubernetes-config" + cluster_name = local.cluster_name +} diff --git a/_examples/aks/outputs.tf b/_examples/aks/outputs.tf new file mode 100644 index 0000000000..18917bfee6 --- /dev/null +++ b/_examples/aks/outputs.tf @@ -0,0 +1,7 @@ +output "kubeconfig_path" { + value = abspath("${path.root}/kubeconfig") +} + +output "cluster_name" { + value = local.cluster_name +} diff --git a/_examples/aks/variables.tf b/_examples/aks/variables.tf new file mode 100644 index 0000000000..e8d47c0d96 --- /dev/null +++ b/_examples/aks/variables.tf @@ -0,0 +1,12 @@ +variable "location" { + type = string + default = "westus2" +} + +resource "random_id" "cluster_name" { + byte_length = 5 +} + +locals { + cluster_name = "tf-k8s-${random_id.cluster_name.hex}" +} diff --git a/_examples/certificate-signing-request/main.tf b/_examples/certificate-signing-request/main.tf index 828b41ebae..323e6b36f2 100644 --- a/_examples/certificate-signing-request/main.tf +++ b/_examples/certificate-signing-request/main.tf @@ -1,6 +1,6 @@ resource "tls_private_key" "example" { algorithm = "ECDSA" - rsa_bits = "4096" + rsa_bits = "4096" } resource "tls_cert_request" "example" { @@ -19,7 +19,7 @@ resource "kubernetes_certificate_signing_request" "example" { } spec { request = tls_cert_request.example.cert_request_pem - usages = ["client auth", "server auth"] + usages = ["client auth", "server auth"] } auto_approve = true } @@ -41,12 +41,12 @@ resource "kubernetes_pod" "main" { } spec { container { - name = "default" - image = "alpine:latest" + name = "default" + image = "alpine:latest" command = ["cat", "/etc/test/tls.crt"] volume_mount { mount_path = "/etc/test" - name = "secretvol" + name = "secretvol" } } volume { diff --git a/_examples/certificate-signing-request/variables.tf b/_examples/certificate-signing-request/variables.tf index 203e53a28e..e81b4aaff6 100644 --- a/_examples/certificate-signing-request/variables.tf +++ b/_examples/certificate-signing-request/variables.tf @@ -1,7 +1,7 @@ -variable example_user { - default = "admin" +variable "example_user" { + default = "admin" } -variable example_org { - default = "example cluster" +variable "example_org" { + default = "example cluster" } diff --git a/_examples/eks/README.md b/_examples/eks/README.md new file mode 100644 index 0000000000..4e091330b8 --- /dev/null +++ b/_examples/eks/README.md @@ -0,0 +1,68 @@ +# EKS (Amazon Elastic Kubernetes Service) + +This example shows how to use the Terraform Kubernetes Provider and Terraform Helm Provider to configure an EKS cluster. The example config builds the EKS cluster and applies the Kubernetes configurations in a single operation. This guide will also show you how to make changes to the underlying EKS cluster in such a way that Kuberntes/Helm resources are recreated after the underlying cluster is replaced. + +You will need the following environment variables to be set: + + - `AWS_ACCESS_KEY_ID` + - `AWS_SECRET_ACCESS_KEY` + +See [AWS Provider docs](https://www.terraform.io/docs/providers/aws/index.html#configuration-reference) for more details about these variables and alternatives, like `AWS_PROFILE`. + +Ensure that `KUBE_CONFIG_FILE` and `KUBE_CONFIG_FILES` environment variables are NOT set, as they will interfere with the cluster build. + +``` +unset KUBE_CONFIG_FILE +unset KUBE_CONFIG_FILES +``` + +To install the EKS cluster using default values, run terraform init and apply from the directory containing this README. + +``` +terraform init +terraform apply +``` + +## Kubeconfig for manual CLI access + +This example generates a kubeconfig file in the current working directory. However, the token in this config expires in 15 minutes. The token can be refreshed by running `terraform apply` again. Export the KUBECONFIG to manually access the cluster: + +``` +terraform apply +export KUBECONFIG=$(terraform output -raw kubeconfig_path) +kubectl get pods -n test +``` + +## Optional variables + +The Kubernetes version can be specified at apply time: + +``` +terraform apply -var=kubernetes_version=1.18 +``` + +See https://docs.aws.amazon.com/eks/latest/userguide/platform-versions.html for currently available versions. + + +### Worker node count and instance type + +The number of worker nodes, and the instance type, can be specified at apply time: + +``` +terraform apply -var=workers_count=4 -var=workers_type=m4.xlarge +``` + +## Additional configuration of EKS + +To view all available configuration options for the EKS module used in this example, see [terraform-aws-modules/eks docs](https://registry.terraform.io/modules/terraform-aws-modules/eks/aws/latest). + +## Replacing the EKS cluster and re-creating the Kubernetes / Helm resources + +When the cluster is initially created, the Kubernetes and Helm providers will not be initialized until authentication details are created for the cluster. However, for future operations that may involve replacing the underlying cluster (for example, changing the network where the EKS cluster resides), the EKS cluster will have to be targeted without the Kubernetes/Helm providers, as shown below. This is done by removing the `module.kubernetes-config` from Terraform State prior to replacing cluster credentials, to avoid passing outdated credentials into the providers. + +This will create the new cluster and the Kubernetes resources in a single apply. + +``` +terraform state rm module.kubernetes-config +terraform apply +``` diff --git a/_examples/eks/kubernetes-config/main.tf b/_examples/eks/kubernetes-config/main.tf new file mode 100644 index 0000000000..2c963afcbd --- /dev/null +++ b/_examples/eks/kubernetes-config/main.tf @@ -0,0 +1,107 @@ +provider "kubernetes" { + host = var.cluster_endpoint + cluster_ca_certificate = base64decode(var.cluster_ca_cert) + exec { + api_version = "client.authentication.k8s.io/v1alpha1" + args = ["eks", "get-token", "--cluster-name", var.cluster_name] + command = "aws" + } +} + +resource "kubernetes_config_map" "name" { + depends_on = [var.cluster_name] + metadata { + name = "aws-auth" + namespace = "kube-system" + } + + data = { + mapRoles = join( + "\n", + formatlist(local.mapped_role_format, var.k8s_node_role_arn), + ) + } +} + +# This allows the kubeconfig file to be refreshed during every Terraform apply. +# Optional: this kubeconfig file is only used for manual CLI access to the cluster. +resource "null_resource" "generate-kubeconfig" { + provisioner "local-exec" { + command = "aws eks update-kubeconfig --name ${var.cluster_name} --kubeconfig ${path.root}/kubeconfig" + } + triggers = { + always_run = timestamp() + } +} + +resource "kubernetes_namespace" "test" { + depends_on = [var.cluster_name] + metadata { + name = "test" + } +} + +resource "kubernetes_deployment" "test" { + depends_on = [var.cluster_name] + metadata { + name = "test" + namespace= kubernetes_namespace.test.metadata.0.name + } + spec { + replicas = 2 + selector { + match_labels = { + app = "test" + } + } + template { + metadata { + labels = { + app = "test" + } + } + spec { + container { + image = "nginx:1.19.4" + name = "nginx" + + resources { + limits = { + memory = "512M" + cpu = "1" + } + requests = { + memory = "256M" + cpu = "50m" + } + } + } + } + } + } +} + +provider "helm" { + kubernetes { + host = var.cluster_endpoint + cluster_ca_certificate = base64decode(var.cluster_ca_cert) + exec { + api_version = "client.authentication.k8s.io/v1alpha1" + args = ["eks", "get-token", "--cluster-name", var.cluster_name] + command = "aws" + } + } +} + +resource helm_release nginx_ingress { + depends_on = [var.cluster_name] + name = "nginx-ingress-controller" + + repository = "https://charts.bitnami.com/bitnami" + chart = "nginx-ingress-controller" + + set { + name = "service.type" + value = "ClusterIP" + } +} diff --git a/_examples/eks/kubernetes-config/variables.tf b/_examples/eks/kubernetes-config/variables.tf new file mode 100644 index 0000000000..169c564f82 --- /dev/null +++ b/_examples/eks/kubernetes-config/variables.tf @@ -0,0 +1,26 @@ +variable "k8s_node_role_arn" { + type = list(string) +} + +variable "cluster_ca_cert" { + type = string +} + +variable "cluster_endpoint" { + type = string +} + +variable "cluster_name" { + type = string +} + +locals { + mapped_role_format = <