diff --git a/.github/workflows/publish-docker-images.yml b/.github/workflows/publish-docker-images.yml index ed42306b8f..a8b1221e04 100644 --- a/.github/workflows/publish-docker-images.yml +++ b/.github/workflows/publish-docker-images.yml @@ -1,5 +1,9 @@ name: Publish Docker images +env: + PROJECT_ID: ${{ secrets.GCP_PROJECT }} + REGION: ${{ secrets.GCP_REGION }} + on: push: branches: @@ -7,6 +11,7 @@ on: - 'staging' - 'test' - 'vcf' + - 'gcp' - 'tetrapack' - 'dev' - 'demo' @@ -57,9 +62,14 @@ jobs: name: Push API Docker image to Docker Hub runs-on: ubuntu-20.04 needs: wait_for_tests + + permissions: + contents: 'read' + id-token: 'write' + steps: - name: Check out the repo - uses: actions/checkout@v2 + uses: actions/checkout@v3 - name: Configure AWS credentials uses: aws-actions/configure-aws-credentials@v1 @@ -72,6 +82,18 @@ jobs: id: ecr-login uses: aws-actions/amazon-ecr-login@v1 + - name: 'Authenticate to Google Cloud' + uses: google-github-actions/auth@v1 + with: + workload_identity_provider: 'projects/168301767246/locations/global/workloadIdentityPools/github-pool/providers/github-provider' + service_account: 'github-actions@landgriffon.iam.gserviceaccount.com' + + - name: Set up Cloud SDK + uses: google-github-actions/setup-gcloud@v1 + + - name: Authorize Docker push + run: gcloud auth configure-docker europe-west1-docker.pkg.dev + - name: Extract branch name shell: bash run: | @@ -85,23 +107,33 @@ jobs: ECR_REPOSITORY: api IMAGE_TAG: ${{ steps.extract_branch.outputs.branch }} run: | - docker build -t $ECR_REGISTRY/$ECR_REPOSITORY:$IMAGE_TAG api + docker build -t $ECR_REGISTRY/$ECR_REPOSITORY:$IMAGE_TAG \ + -t europe-west1-docker.pkg.dev/${{ env.PROJECT_ID }}/api/main:${{ github.sha }} \ + -t europe-west1-docker.pkg.dev/${{ env.PROJECT_ID }}/api/main:latest \ + api - - name: Push API Docker image to AWS ECR + - name: Push API Docker image to AWS and GCP env: ECR_REGISTRY: ${{ steps.ecr-login.outputs.registry }} ECR_REPOSITORY: api IMAGE_TAG: ${{ steps.extract_branch.outputs.branch }} run: | docker push $ECR_REGISTRY/$ECR_REPOSITORY:$IMAGE_TAG + docker push europe-west1-docker.pkg.dev/${{ env.PROJECT_ID }}/api/main:${{ github.sha }} + docker push europe-west1-docker.pkg.dev/${{ env.PROJECT_ID }}/api/main:latest push_client_to_registry: name: Push Client Docker image to Docker Hub runs-on: ubuntu-20.04 needs: wait_for_tests + + permissions: + contents: 'read' + id-token: 'write' + steps: - name: Check out the repo - uses: actions/checkout@v2 + uses: actions/checkout@v3 - name: Configure AWS credentials uses: aws-actions/configure-aws-credentials@v1 @@ -114,6 +146,18 @@ jobs: id: ecr-login uses: aws-actions/amazon-ecr-login@v1 + - name: 'Authenticate to Google Cloud' + uses: google-github-actions/auth@v1 + with: + workload_identity_provider: 'projects/168301767246/locations/global/workloadIdentityPools/github-pool/providers/github-provider' + service_account: 'github-actions@landgriffon.iam.gserviceaccount.com' + + - name: Set up Cloud SDK + uses: google-github-actions/setup-gcloud@v1 + + - name: Authorize Docker push + run: gcloud auth configure-docker europe-west1-docker.pkg.dev + - name: Extract branch name shell: bash run: | @@ -134,25 +178,34 @@ jobs: --build-arg NEXT_PUBLIC_API_URL=${{ secrets[format('NEXT_PUBLIC_API_URL_{0}', steps.extract_branch.outputs.branch-upper )] }} \ --build-arg CYPRESS_USERNAME=${{ secrets.CYPRESS_USERNAME }} \ --build-arg CYPRESS_PASSWORD=${{ secrets.CYPRESS_PASSWORD }} \ - -t $ECR_REGISTRY/$ECR_REPOSITORY:$IMAGE_TAG \ + -t $ECR_REGISTRY/$ECR_REPOSITORY:$IMAGE_TAG \ + -t europe-west1-docker.pkg.dev/${{ env.PROJECT_ID }}/client/main:${{ github.sha }} \ + -t europe-west1-docker.pkg.dev/${{ env.PROJECT_ID }}/client/main:latest \ client - - name: Push Client Docker image to AWS ECR + - name: Push Client Docker image to AWS and GCP env: ECR_REGISTRY: ${{ steps.ecr-login.outputs.registry }} ECR_REPOSITORY: client IMAGE_TAG: ${{ steps.extract_branch.outputs.branch }} run: | docker push $ECR_REGISTRY/$ECR_REPOSITORY:$IMAGE_TAG + docker push europe-west1-docker.pkg.dev/${{ env.PROJECT_ID }}/client/main:${{ github.sha }} + docker push europe-west1-docker.pkg.dev/${{ env.PROJECT_ID }}/client/main:latest push_data_import_to_registry: name: Push Data Import Docker image to Docker Hub runs-on: ubuntu-20.04 needs: wait_for_tests + + permissions: + contents: 'read' + id-token: 'write' + steps: - name: Check out the repo - uses: actions/checkout@v2 + uses: actions/checkout@v3 - name: Configure AWS credentials uses: aws-actions/configure-aws-credentials@v1 @@ -165,6 +218,18 @@ jobs: id: ecr-login uses: aws-actions/amazon-ecr-login@v1 + - name: 'Authenticate to Google Cloud' + uses: google-github-actions/auth@v1 + with: + workload_identity_provider: 'projects/168301767246/locations/global/workloadIdentityPools/github-pool/providers/github-provider' + service_account: 'github-actions@landgriffon.iam.gserviceaccount.com' + + - name: Set up Cloud SDK + uses: google-github-actions/setup-gcloud@v1 + + - name: Authorize Docker push + run: gcloud auth configure-docker europe-west1-docker.pkg.dev + - name: Extract branch name shell: bash run: | @@ -178,23 +243,33 @@ jobs: ECR_REPOSITORY: data_import IMAGE_TAG: ${{ steps.extract_branch.outputs.branch }} run: | - docker build -t $ECR_REGISTRY/$ECR_REPOSITORY:$IMAGE_TAG data + docker build -t $ECR_REGISTRY/$ECR_REPOSITORY:$IMAGE_TAG \ + -t europe-west1-docker.pkg.dev/${{ env.PROJECT_ID }}/data-import/main:${{ github.sha }} \ + -t europe-west1-docker.pkg.dev/${{ env.PROJECT_ID }}/data-import/main:latest \ + data - - name: Push Data Import Docker image to AWS ECR + - name: Push Data Import Docker image to AWS and GCP env: ECR_REGISTRY: ${{ steps.ecr-login.outputs.registry }} ECR_REPOSITORY: data_import IMAGE_TAG: ${{ steps.extract_branch.outputs.branch }} run: | docker push $ECR_REGISTRY/$ECR_REPOSITORY:$IMAGE_TAG + docker push europe-west1-docker.pkg.dev/${{ env.PROJECT_ID }}/data-import/main:${{ github.sha }} + docker push europe-west1-docker.pkg.dev/${{ env.PROJECT_ID }}/data-import/main:latest push_tiler_to_registry: - name: Push Tiler Docker image to AWS ECR + name: Push Tiler Docker image to AWS and GCP runs-on: ubuntu-20.04 needs: wait_for_tests + + permissions: + contents: 'read' + id-token: 'write' + steps: - name: Check out the repo - uses: actions/checkout@v2 + uses: actions/checkout@v3 - name: Configure AWS credentials uses: aws-actions/configure-aws-credentials@v1 @@ -207,6 +282,18 @@ jobs: id: ecr-login uses: aws-actions/amazon-ecr-login@v1 + - name: 'Authenticate to Google Cloud' + uses: google-github-actions/auth@v1 + with: + workload_identity_provider: 'projects/168301767246/locations/global/workloadIdentityPools/github-pool/providers/github-provider' + service_account: 'github-actions@landgriffon.iam.gserviceaccount.com' + + - name: Set up Cloud SDK + uses: google-github-actions/setup-gcloud@v1 + + - name: Authorize Docker push + run: gcloud auth configure-docker europe-west1-docker.pkg.dev + - name: Extract branch name shell: bash run: | @@ -220,11 +307,17 @@ jobs: ECR_REPOSITORY: tiler IMAGE_TAG: ${{ steps.extract_branch.outputs.branch }} run: | - docker build -t $ECR_REGISTRY/$ECR_REPOSITORY:$IMAGE_TAG tiler - - name: Push Tiler Docker image to AWS ECR + docker build -t $ECR_REGISTRY/$ECR_REPOSITORY:$IMAGE_TAG \ + -t europe-west1-docker.pkg.dev/${{ env.PROJECT_ID }}/tiler/main:${{ github.sha }} \ + -t europe-west1-docker.pkg.dev/${{ env.PROJECT_ID }}/tiler/main:latest \ + tiler + + - name: Push Tiler Docker image to AWS and GCP env: ECR_REGISTRY: ${{ steps.ecr-login.outputs.registry }} ECR_REPOSITORY: tiler IMAGE_TAG: ${{ steps.extract_branch.outputs.branch }} run: | docker push $ECR_REGISTRY/$ECR_REPOSITORY:$IMAGE_TAG + docker push europe-west1-docker.pkg.dev/${{ env.PROJECT_ID }}/tiler/main:${{ github.sha }} + docker push europe-west1-docker.pkg.dev/${{ env.PROJECT_ID }}/tiler/main:latest diff --git a/.github/workflows/publish-marketing-site.yml b/.github/workflows/publish-marketing-site.yml index 6128045f85..71276492d8 100644 --- a/.github/workflows/publish-marketing-site.yml +++ b/.github/workflows/publish-marketing-site.yml @@ -2,7 +2,6 @@ name: Publish marketing site image env: PROJECT_ID: ${{ secrets.GCP_PROJECT }} - SERVICE: 'marketing' REGION: ${{ secrets.GCP_REGION }} on: @@ -12,6 +11,7 @@ on: - 'staging' - 'test' - 'vcf' + - 'gcp' - 'tetrapack' - 'dev' - 'demo' @@ -63,15 +63,15 @@ jobs: --build-arg SENDGRID_API_KEY_SUBSCRIPTION=${{ secrets.MARKETING_SENDGRID_API_KEY_SUBSCRIPTION }} \ --build-arg SENDGRID_API_KEY_CONTACT=${{ secrets.MARKETING_SENDGRID_API_KEY_CONTACT }} \ --build-arg NEXT_PUBLIC_GOOGLE_ANALYTICS=${{ secrets.MARKETING_NEXT_PUBLIC_GOOGLE_ANALYTICS }} \ - -t europe-west1-docker.pkg.dev/${{ env.PROJECT_ID }}/${{ env.SERVICE }}/main:${{ github.sha }} \ - -t europe-west1-docker.pkg.dev/${{ env.PROJECT_ID }}/${{ env.SERVICE }}/main:latest ./marketing - docker push europe-west1-docker.pkg.dev/${{ env.PROJECT_ID }}/${{ env.SERVICE }}/main:${{ github.sha }} - docker push europe-west1-docker.pkg.dev/${{ env.PROJECT_ID }}/${{ env.SERVICE }}/main:latest + -t europe-west1-docker.pkg.dev/${{ env.PROJECT_ID }}/marketing/main:${{ github.sha }} \ + -t europe-west1-docker.pkg.dev/${{ env.PROJECT_ID }}/marketing/main:latest ./marketing + docker push europe-west1-docker.pkg.dev/${{ env.PROJECT_ID }}/marketing/main:${{ github.sha }} + docker push europe-west1-docker.pkg.dev/${{ env.PROJECT_ID }}/marketing/main:latest - name: Deploy to Cloud Run run: |- - gcloud run deploy ${{ env.SERVICE }} \ + gcloud run deploy marketing \ --region ${{ env.REGION }} \ - --image europe-west1-docker.pkg.dev/${{ env.PROJECT_ID }}/${{ env.SERVICE }}/main:latest \ + --image europe-west1-docker.pkg.dev/${{ env.PROJECT_ID }}/marketing/main:latest \ --platform "managed" \ --quiet diff --git a/data/h3_data_importer/Makefile b/data/h3_data_importer/Makefile index cc903c12bf..8f61b53b9f 100644 --- a/data/h3_data_importer/Makefile +++ b/data/h3_data_importer/Makefile @@ -19,6 +19,9 @@ WORKDIR_HDI=data/contextual/hdi WORKDIR_GHG=data/forest_ghg WORKDIR_WOODPULP=data/woodpulp +export AWS_ACCESS_KEY_ID = $(DATA_S3_ACCESS_KEY) +export AWS_SECRET_ACCESS_KEY = $(DATA_S3_SECRET_KEY) + all: @aws s3 ls $(AWS_S3_BUCKET_URL) 2>&1 > /dev/null; \ if [ $$? -ne 0 ]; \ diff --git a/infrastructure/base/aws.tf b/infrastructure/base/aws.tf index b46edea8f6..0fd5e8d2be 100644 --- a/infrastructure/base/aws.tf +++ b/infrastructure/base/aws.tf @@ -145,3 +145,34 @@ resource "aws_iam_role_policy_attachment" "raw_s3_rw_access_attachment" { role = module.eks.node_role.name policy_arn = aws_iam_policy.raw_s3_rw_access.arn } + +resource "aws_iam_user" "raw_s3_reader" { + name = "ReadAccessToRawDataS3Bucket" +} + +resource "aws_iam_policy" "raw_s3_read_access" { + name = "ReadAccessToRawDataS3Bucket" + description = "Read access to the raw data S3 bucket" + + policy = jsonencode({ + Version = "2012-10-17" + Statement = [ + { + "Action" : [ + "s3:Get*", + "s3:List*", + ], + Effect = "Allow" + Resource = [ + module.s3_bucket.bucket_arn, + "${module.s3_bucket.bucket_arn}/*", + ] + }, + ] + }) +} + +resource "aws_iam_user_policy_attachment" "raw_s3_rw_access_attachment" { + user = aws_iam_user.raw_s3_reader.name + policy_arn = aws_iam_policy.raw_s3_read_access.arn +} diff --git a/infrastructure/base/gcp.tf b/infrastructure/base/gcp.tf index da2156fd2f..74276b1cbe 100644 --- a/infrastructure/base/gcp.tf +++ b/infrastructure/base/gcp.tf @@ -28,6 +28,38 @@ module "marketing_gcr" { service_account = module.workload_identity.service_account } +module "client_gcr" { + source = "./modules/gcp/gcr" + project_id = var.gcp_project_id + region = var.gcp_region + name = "client" + service_account = module.gke.node_service_account +} + +module "api_gcr" { + source = "./modules/gcp/gcr" + project_id = var.gcp_project_id + region = var.gcp_region + name = "api" + service_account = module.gke.node_service_account +} + +module "tiler_gcr" { + source = "./modules/gcp/gcr" + project_id = var.gcp_project_id + region = var.gcp_region + name = "tiler" + service_account = module.gke.node_service_account +} + +module "data_import_gcr" { + source = "./modules/gcp/gcr" + project_id = var.gcp_project_id + region = var.gcp_region + name = "data-import" + service_account = module.gke.node_service_account +} + module "load_balancer" { source = "./modules/gcp/load-balancer" region = var.gcp_region @@ -42,3 +74,14 @@ module "workload_identity" { project_id = var.gcp_project_id } + +module "gke" { + source = "./modules/gcp/gke" + cluster_name = var.project_name + node_pool_name = "default-pool" + zone = var.gcp_zone + region = var.gcp_region + project = var.gcp_project_id + network = module.network.network_name + subnetwork = module.network.subnetwork_name +} diff --git a/infrastructure/base/modules/gcp/gcr/outputs.tf b/infrastructure/base/modules/gcp/gcr/outputs.tf new file mode 100644 index 0000000000..6a1ce29503 --- /dev/null +++ b/infrastructure/base/modules/gcp/gcr/outputs.tf @@ -0,0 +1,3 @@ +output "artifact_registry_repository_url" { + value = "${google_artifact_registry_repository.repository.location}-docker.pkg.dev/${google_artifact_registry_repository.repository.project}/${google_artifact_registry_repository.repository.name}/main" +} diff --git a/infrastructure/base/modules/gcp/gke/main.tf b/infrastructure/base/modules/gcp/gke/main.tf new file mode 100644 index 0000000000..ddf03f1407 --- /dev/null +++ b/infrastructure/base/modules/gcp/gke/main.tf @@ -0,0 +1,67 @@ +resource "google_project_service" "container_api" { + service = "container.googleapis.com" + disable_on_destroy = false +} + +resource "google_container_cluster" "k8s_cluster" { + name = var.cluster_name + location = var.zone + network = var.network + subnetwork = var.subnetwork + initial_node_count = 1 + + networking_mode = "VPC_NATIVE" + + remove_default_node_pool = true + + release_channel { + channel = "REGULAR" + } + + ip_allocation_policy { + cluster_ipv4_cidr_block = "10.48.0.0/14" + services_ipv4_cidr_block = "10.52.0.0/20" + } +} + +resource "google_service_account" "eks-node-service-account" { + account_id = "eks-node-service-account" + display_name = "EKS Nodes Service Account" +} + +resource "google_container_node_pool" "default-pool" { + name = var.node_pool_name + location = var.zone + cluster = google_container_cluster.k8s_cluster.name + + management { + auto_repair = true + auto_upgrade = true + } + + autoscaling { + max_node_count = 6 + min_node_count = 2 + } + + node_config { + preemptible = false + machine_type = "n1-standard-2" + + service_account = google_service_account.eks-node-service-account.email + + oauth_scopes = [ + "https://www.googleapis.com/auth/devstorage.read_only", + "https://www.googleapis.com/auth/logging.write", + "https://www.googleapis.com/auth/monitoring", + "https://www.googleapis.com/auth/service.management.readonly", + "https://www.googleapis.com/auth/servicecontrol", + "https://www.googleapis.com/auth/trace.append", + "https://www.googleapis.com/auth/sqlservice.admin", + ] + + labels = { + type = "default" + } + } +} diff --git a/infrastructure/base/modules/gcp/gke/outputs.tf b/infrastructure/base/modules/gcp/gke/outputs.tf new file mode 100644 index 0000000000..b9537e4e97 --- /dev/null +++ b/infrastructure/base/modules/gcp/gke/outputs.tf @@ -0,0 +1,15 @@ +output "cluster_endpoint" { + value = google_container_cluster.k8s_cluster.endpoint +} + +output "cluster_name" { + value = google_container_cluster.k8s_cluster.name +} + +output "cluster_ca_certificate" { + value = google_container_cluster.k8s_cluster.master_auth[0].cluster_ca_certificate +} + +output "node_service_account" { + value = google_service_account.eks-node-service-account +} diff --git a/infrastructure/base/modules/gcp/gke/variables.tf b/infrastructure/base/modules/gcp/gke/variables.tf new file mode 100644 index 0000000000..fa8a438e80 --- /dev/null +++ b/infrastructure/base/modules/gcp/gke/variables.tf @@ -0,0 +1,35 @@ +variable "region" { + description = "A valid GCP region to configure the underlying GCP SDK." + type = string +} + +variable "zone" { + description = "A valid GCP zone to configure the underlying GCP SDK." + type = string +} + +variable "project" { + description = "Name of the GCP project" + type = string +} + +variable "cluster_name" { + description = "Name of the GKE cluster" + type = string +} + +variable "node_pool_name" { + description = "Name of the GKE node pool" + type = string +} + +variable "network" { + description = "Name of the VPC network" + type = string +} + + +variable "subnetwork" { + description = "Name of the VPC subnet" + type = string +} diff --git a/infrastructure/base/modules/gcp/network/outputs.tf b/infrastructure/base/modules/gcp/network/outputs.tf index cd0f7dbf61..44761e27d7 100644 --- a/infrastructure/base/modules/gcp/network/outputs.tf +++ b/infrastructure/base/modules/gcp/network/outputs.tf @@ -2,6 +2,10 @@ output "network_id" { value = google_compute_network.network.id } +output "network_name" { + value = google_compute_network.network.name +} + output "subnetwork_name" { value = google_compute_subnetwork.private.name } diff --git a/infrastructure/base/outputs.tf b/infrastructure/base/outputs.tf index fe05492cd1..063f4adc23 100644 --- a/infrastructure/base/outputs.tf +++ b/infrastructure/base/outputs.tf @@ -1,4 +1,9 @@ -# Output values which can be referenced in other repos +output "science_bucket_name" { + value = module.s3_bucket.science_bucket_name +} + +# AWS resources + output "account_id" { value = data.aws_caller_identity.current.account_id description = "ID of AWS account" @@ -26,6 +31,22 @@ output "private_subnet_ids" { value = module.vpc.private_subnet_ids } +output "aws_api_container_registry_url" { + value = module.api_container_registry.container_registry_url +} + +output "aws_tiler_container_registry_url" { + value = module.tiler_container_registry.container_registry_url +} + +output "aws_client_container_registry_url" { + value = module.client_container_registry.container_registry_url +} + +output "aws_data_import_container_registry_url" { + value = module.data_import_container_registry.container_registry_url +} + output "eks_cluster_host" { value = module.eks.cluster.endpoint description = "EKS cluster endpoint" @@ -36,20 +57,20 @@ output "eks_cluster_name" { description = "EKS cluster name" } -output "api_container_registry_url" { - value = module.api_container_registry.container_registry_url +output "raw_s3_reader" { + value = aws_iam_user.raw_s3_reader } -output "tiler_container_registry_url" { - value = module.tiler_container_registry.container_registry_url -} +# GCP resources -output "client_container_registry_url" { - value = module.client_container_registry.container_registry_url +output "gke_cluster_name" { + value = module.gke.cluster_name + description = "GKE cluster name" } -output "data_import_container_registry_url" { - value = module.data_import_container_registry.container_registry_url +output "gke_cluster_host" { + value = module.gke.cluster_endpoint + description = "GKE cluster endpoint" } output "gcp_workload_identity_provider" { @@ -60,7 +81,18 @@ output "gcp_service_account" { value = module.workload_identity.service_account.email } -output "science_bucket_name" { - value = module.s3_bucket.science_bucket_name +output "gcp_api_container_registry_url" { + value = module.api_gcr.artifact_registry_repository_url +} + +output "gcp_tiler_container_registry_url" { + value = module.tiler_gcr.artifact_registry_repository_url } +output "gcp_client_container_registry_url" { + value = module.client_gcr.artifact_registry_repository_url +} + +output "gcp_data_import_container_registry_url" { + value = module.data_import_gcr.artifact_registry_repository_url +} diff --git a/infrastructure/base/variables.tf b/infrastructure/base/variables.tf index 4d79ad2c5f..0f9dd2eedd 100644 --- a/infrastructure/base/variables.tf +++ b/infrastructure/base/variables.tf @@ -17,12 +17,17 @@ variable "domain" { variable "aws_region" { default = "eu-west-3" type = string - description = "A valid AWS region to configure the underlying AWS SDK." + description = "A valid GCP region to configure the underlying GCP SDK." } variable "gcp_region" { type = string - description = "A valid AWS region to configure the underlying AWS SDK." + description = "A valid GCP region to configure the underlying GCP SDK." +} + +variable "gcp_zone" { + type = string + description = "A valid GCP zone to configure the underlying GCP SDK." } # define GCP project id @@ -95,7 +100,7 @@ variable "default_node_group_min_size" { variable "default_node_group_max_size" { type = number - default = 5 + default = 6 } variable "default_node_group_desired_size" { diff --git a/infrastructure/kubernetes/.terraform.lock.hcl b/infrastructure/kubernetes/.terraform.lock.hcl index 3e73a50d8c..b6e6f9af49 100644 --- a/infrastructure/kubernetes/.terraform.lock.hcl +++ b/infrastructure/kubernetes/.terraform.lock.hcl @@ -41,44 +41,62 @@ provider "registry.terraform.io/hashicorp/aws" { } provider "registry.terraform.io/hashicorp/github" { - version = "5.3.0" + version = "5.18.3" hashes = [ - "h1:DVgl7G+BOUt0tBLW3LwCuJh7FmliwG4Y+KiELX4gN9U=", - "h1:pFsKVGjnvAUu9Scqkk3W0EdjEXtgkTz2qxKYMMA/Bww=", - "zh:1ad22c2d5b02f16ff6281e471be93d9e33f102020e7d88b2a86fd97b7f2c3728", - "zh:1d3968417f7cd87678d505afd12d8d753e692dd90c6ba0f52e7b150c69649eaf", - "zh:1fd5c610488671e7685ebd9b4afaf6fb86f5540f4a9df03a6ef4d449aec761c2", - "zh:2d494a769e8c5f9ac1cbb115117d4eba8dccf1f5524264ee010d67d92c43a639", - "zh:53642cbb984f9cb7a1f5a4a2bab274d11690411c8e0fa58e9a521588c15f6fc1", - "zh:5a62005caf307169feb457a89f53530221b04cd18ffa6862109ef1cf19972ec6", - "zh:64cbffbc2cea25892defc75e53c01bd0d3f51537b892869101fad5109c9ba80e", - "zh:653ed9b6f8b81ad6c0dd7428ac1cd6a90608db44d12d92c3e68ffb2da7abda4b", - "zh:6e197a456605392c501b662006a7c175c32cadabebe93671f7dc1d8e8c3b635b", - "zh:7bc207b8363f404f8b40f90329b04eb480ba09e7581408d7158edc4282daa86d", - "zh:80504cbf6f7214949ac3a43d652c8e8bf8059446174f30338f106bcc5f7b3070", - "zh:c8d2d65d9d89cb34bd63e6b2f48774469b9f8862e364e92c4f64db33930b60fb", - "zh:e2a81619918d92e16c461ea5e68ffec1b2860d1e80742018690a325c3b5da5db", - "zh:f0077fbaee5580650c9e09cc558620194478406998b1c4e875f91016ea543fc4", + "h1:Z/0vjFX80YzM3Oeq0mBbn4XYwb1POggjsu3RVQcbjNc=", + "zh:050b37d96628cb7451137755929ca8d21ea546bc46d11a715652584070e83ff2", + "zh:053051061f1b7f7673b0ceffac1f239ba28b0e5b375999206fd39976e85d9f2b", + "zh:0c300a977ca66d0347ed62bb116fd8fc9abb376a554d4c192d14f3ea71c83500", + "zh:1d5a1a5243eba78819d2f92ff2d504ebf9a9008a6670fb5f5660f44eb6a156d8", + "zh:a13ac15d251ebf4e7dc40acb0e40df066f443f4c7799186a29e2e44addc7d8e7", + "zh:a316d94b885953c036ebc9fba64a23da93974746bc3ac9d207462a6f02d44540", + "zh:a658a00373bff5979cc227052c693cbde8ca4c8f9fef1bc8094a3516f2e2a96d", + "zh:a7bfc6ad8465d5dc11b6f19d6805364de87fffe27622bb4f37da2319bb1c4956", + "zh:d7379a76861f1a6bfc36eca7a20f1f477711247563b105744d69d7bd1f365fad", + "zh:de1cd959fd4821248e8d21570601193408648474e74f49597f1d0c43185a4ab7", + "zh:e0b281240dd6f2aa405b2d6fe329bc15ab877161affe163fb150d1efca2fccdb", + "zh:e372c171358757a983d7aa878abfd05a84484fb4d22167e45c9c1267e78ed060", + "zh:f6d3116526030b3f6905f530cd6c04b23d42890d973fa2abe10ce9c89cb1db80", + "zh:f99eec731e03cc6a28996c875bd435887cd7ea75ec07cc77b9e768bb12da2227", + ] +} + +provider "registry.terraform.io/hashicorp/google" { + version = "4.51.0" + constraints = "4.51.0" + hashes = [ + "h1:7JFdiV9bvV6R+AeWzvNbVeoega481sJY3PqtIbrwTsM=", + "zh:001bf7478e495d497ffd4054453c97ab4dd3e6a24d46496d51d4c8094e95b2b1", + "zh:19db72113552dd295854a99840e85678d421312708e8329a35787fff1baeed8b", + "zh:42c3e629ace225a2cb6cf87b8fabeaf1c56ac8eca6a77b9e3fc489f3cc0a9db5", + "zh:50b930755c4b1f8a01c430d8f688ea79de0b0198c87511baa3a783e360d7e624", + "zh:5acd67f0aafff5ad59e179543cccd1ffd48d69b98af0228506403b8d8193b340", + "zh:70128d57b4b4bf07df941172e6af15c4eda8396af5cc2b0128c906983c7b7fad", + "zh:7905fac0ba2becf0e97edfcd4224e57466b04f960f36a3ec654a0a3c2ffececb", + "zh:79b4cc760305cd77c1ff841f789184f808b8052e8f4faa5cb8d518e4c13beb22", + "zh:c7aebd7d7dd2b29de28e382500d36fae8b4d8a192cf05e41ea29c66f1251acfc", + "zh:d8b4494b13ef5af65d3afedf05bf7565918f1e31ad68ae0df81f5c3b12baf519", + "zh:e6e68ef6881bc3312db50c9fd761f226f34d7834b64f90d96616b7ca6b1daf34", + "zh:f569b65999264a9416862bca5cd2a6177d94ccb0424f3a4ef424428912b9cb3c", ] } provider "registry.terraform.io/hashicorp/helm" { - version = "2.7.0" + version = "2.7.1" constraints = "~> 2.7.0" hashes = [ - "h1:YXQgYy5YoqnMgKwlgRmkkUhlSKAX2RMOMujb86ua3jU=", - "h1:quavRe9VlwM06DoCgMckuj+5T48g+lfG75pip+iIbFQ=", - "zh:01f7428823c169e20c051e363e580093b874d32e64fe8feab665cc9d1d599691", - "zh:089511b2b363d9bd4d47cb0975e4612c0ae02bdac6185e8872ded7e229d27192", - "zh:0b4ab015e114a3f73b320b716d9aa081b378736389e85aa13c6aba430c219029", - "zh:323d0ef19629a78f767a0895095ad7a79c7e2cb00d2596b641aac44f05f413e7", - "zh:3a61195d2600ffdf715cc5591cda3d4438825d155c7a422fe0eca0d850ac478c", - "zh:8321d46df29f2a1c434f28142f60fc4bd24523bff74072e80691dd4b75e33bc3", - "zh:91d8e17f20bcdfad35fafd44eadb6e61437144caa754649b87a8229af77b810e", - "zh:b76c569a7333a38d88fe63f6d22aaa80239d165b86c94b4a8f0027aaf06c119b", - "zh:d21ba6632b306c53318921f1007e3cdbef3e90928ccfdb7ca9a0946cb6970b10", - "zh:e2268e24d3978b62cd24284df0ec1d1e1153d37054219e8792495b2aef241d5e", - "zh:ecfede6ace6afc0e1b0fe8c525efcc207fbb6b07e8805bc38b0787c4702bd566", + "h1:11oWNeohjD8Fy9S7WQSKY3GmDZi7gVdMRp8/Wqxn410=", + "zh:13e2467092deeff01c4cfa2b54ba4510aa7a9b06c58f22c4215b0f4333858364", + "zh:4549843db4fdf5d8150e8c0734e67b54b5c3bcfc914e3221e6952f428fb984d2", + "zh:55b5f83ed52f93dd00a73c33c948326052efd700350c19e63bb1679b12bfcda6", + "zh:749397e41393289eb0ef6efd0a75911d29b8aa7f48e5d6813b4b350dad91acbd", + "zh:7a4a2c95b055f6c8e70d1fc7a4cc4fd6e4f04845be36e40d42d31dfc13db37b8", + "zh:8143e5b8218857052505c805b570889b862c618ce6cbfbddb98938ff7a5901d3", + "zh:856d94b3b34d6204d66c6de4feab4737c74dba037ad64e4c613e8eec61d17f1a", + "zh:b9b037f1edda209022df1c7fc906786970524873e27b061f3355cb9bbed2cf08", + "zh:c433b27f52a0600490af07f8b217ab0b1048ba347d68e6fe478aba18634e78d9", + "zh:da133748368c6e27b433cd7faeb7b800536c8651e7af0415452901dfc7577dbf", + "zh:eecc63c2dec8aafa2ffd7426800c3e1a5e31e848be01ea9511ad0184dce15945", "zh:f569b65999264a9416862bca5cd2a6177d94ccb0424f3a4ef424428912b9cb3c", ] } @@ -105,22 +123,21 @@ provider "registry.terraform.io/hashicorp/kubernetes" { } provider "registry.terraform.io/hashicorp/null" { - version = "3.1.1" + version = "3.2.1" hashes = [ - "h1:71sNUDvmiJcijsvfXpiLCz0lXIBSsEJjMxljt7hxMhw=", - "h1:Pctug/s/2Hg5FJqjYcTM0kPyx3AoYK1MpRWO0T9V2ns=", - "zh:063466f41f1d9fd0dd93722840c1314f046d8760b1812fa67c34de0afcba5597", - "zh:08c058e367de6debdad35fc24d97131c7cf75103baec8279aba3506a08b53faf", - "zh:73ce6dff935150d6ddc6ac4a10071e02647d10175c173cfe5dca81f3d13d8afe", + "h1:FbGfc+muBsC17Ohy5g806iuI1hQc4SIexpYCrQHQd8w=", + "zh:58ed64389620cc7b82f01332e27723856422820cfd302e304b5f6c3436fb9840", + "zh:62a5cc82c3b2ddef7ef3a6f2fedb7b9b3deff4ab7b414938b08e51d6e8be87cb", + "zh:63cff4de03af983175a7e37e52d4bd89d990be256b16b5c7f919aff5ad485aa5", + "zh:74cb22c6700e48486b7cabefa10b33b801dfcab56f1a6ac9b6624531f3d36ea3", "zh:78d5eefdd9e494defcb3c68d282b8f96630502cac21d1ea161f53cfe9bb483b3", - "zh:8fdd792a626413502e68c195f2097352bdc6a0df694f7df350ed784741eb587e", - "zh:976bbaf268cb497400fd5b3c774d218f3933271864345f18deebe4dcbfcd6afa", - "zh:b21b78ca581f98f4cdb7a366b03ae9db23a73dfa7df12c533d7c19b68e9e72e5", - "zh:b7fc0c1615dbdb1d6fd4abb9c7dc7da286631f7ca2299fb9cd4664258ccfbff4", - "zh:d1efc942b2c44345e0c29bc976594cb7278c38cfb8897b344669eafbc3cddf46", - "zh:e356c245b3cd9d4789bab010893566acace682d7db877e52d40fc4ca34a50924", - "zh:ea98802ba92fcfa8cf12cbce2e9e7ebe999afbf8ed47fa45fc847a098d89468b", - "zh:eff8872458806499889f6927b5d954560f3d74bf20b6043409edf94d26cd906f", + "zh:79e553aff77f1cfa9012a2218b8238dd672ea5e1b2924775ac9ac24d2a75c238", + "zh:a1e06ddda0b5ac48f7e7c7d59e1ab5a4073bbcf876c73c0299e4610ed53859dc", + "zh:c37a97090f1a82222925d45d84483b2aa702ef7ab66532af6cbcfb567818b970", + "zh:e4453fbebf90c53ca3323a92e7ca0f9961427d2f0ce0d2b65523cc04d5d999c2", + "zh:e80a746921946d8b6761e77305b752ad188da60688cfd2059322875d363be5f5", + "zh:fbdb892d9822ed0e4cb60f2fedbdbb556e4da0d88d3b942ae963ed6ff091e48f", + "zh:fca01a623d90d0cad0843102f9b8b9fe0d3ff8244593bd817f126582b52dd694", ] } @@ -164,24 +181,23 @@ provider "registry.terraform.io/hashicorp/template" { } provider "registry.terraform.io/integrations/github" { - version = "5.3.0" - constraints = "~> 5.3.0" + version = "5.17.0" + constraints = "5.17.0" hashes = [ - "h1:DVgl7G+BOUt0tBLW3LwCuJh7FmliwG4Y+KiELX4gN9U=", - "h1:pFsKVGjnvAUu9Scqkk3W0EdjEXtgkTz2qxKYMMA/Bww=", - "zh:1ad22c2d5b02f16ff6281e471be93d9e33f102020e7d88b2a86fd97b7f2c3728", - "zh:1d3968417f7cd87678d505afd12d8d753e692dd90c6ba0f52e7b150c69649eaf", - "zh:1fd5c610488671e7685ebd9b4afaf6fb86f5540f4a9df03a6ef4d449aec761c2", - "zh:2d494a769e8c5f9ac1cbb115117d4eba8dccf1f5524264ee010d67d92c43a639", - "zh:53642cbb984f9cb7a1f5a4a2bab274d11690411c8e0fa58e9a521588c15f6fc1", - "zh:5a62005caf307169feb457a89f53530221b04cd18ffa6862109ef1cf19972ec6", - "zh:64cbffbc2cea25892defc75e53c01bd0d3f51537b892869101fad5109c9ba80e", - "zh:653ed9b6f8b81ad6c0dd7428ac1cd6a90608db44d12d92c3e68ffb2da7abda4b", - "zh:6e197a456605392c501b662006a7c175c32cadabebe93671f7dc1d8e8c3b635b", - "zh:7bc207b8363f404f8b40f90329b04eb480ba09e7581408d7158edc4282daa86d", - "zh:80504cbf6f7214949ac3a43d652c8e8bf8059446174f30338f106bcc5f7b3070", - "zh:c8d2d65d9d89cb34bd63e6b2f48774469b9f8862e364e92c4f64db33930b60fb", - "zh:e2a81619918d92e16c461ea5e68ffec1b2860d1e80742018690a325c3b5da5db", - "zh:f0077fbaee5580650c9e09cc558620194478406998b1c4e875f91016ea543fc4", + "h1:CWw2DL8qmBp/LkqZAC3HiNFskw4bPyZYXgVgwUK7Lew=", + "zh:0caa38dab96d68621a1ae7087ca3b86f42aa0e6fc250f906299f1a34c9dd1e54", + "zh:1119f8dacb2da0de0735e9ae586702e5f9758b963e548b5fa09a9f216d00bbc4", + "zh:16bed2a93216aa573d1b2ff7cd371c9df3d454284204a4695d5b30f7325f49b3", + "zh:537d29a3a18d6b3a588c8878793d99d937d1e29466c02ce08536943a26931387", + "zh:664d83424cc8d12055806134e5d110b82f469fb5824d3c3ffe1ea399637aed5d", + "zh:725d6633fb92069bce53cb8b0f3b4d4a1fb4c0a336b138f62096dc2f7d4c2155", + "zh:8003646cc7caaa48841e802570626fd5cc8ad1bb2a341351ccf996eae62e88cb", + "zh:945f1f70842d04192626ae8e78372e48d16808d5104563bce32915c95236d820", + "zh:a0d8a25f8d84e78c3cfd5691f71c48f805ad38dab0a6a33f4d8e5cfc981b9cd9", + "zh:a3ba46c09233c4b77b63807654083385cc865e650bbb6274d8768bb18ff01508", + "zh:a80b7190ed733b9de6f3cfb55e82234457f51bb36bdcc11277a7623a47155cb4", + "zh:ba3f6f61deafaae1de92c17e924c7ef157ca0db2d5e14ae637a3a63bb1aeac9f", + "zh:c7b9790c722e597dc4e3d59bc9b510f364b3a522b70cd58727da09cd6adcf527", + "zh:f293b9ee146b2f22d79d4e53f0a1eb6bfdf8dca1d92bc39370a9df52046fdaa3", ] } diff --git a/infrastructure/kubernetes/main.tf b/infrastructure/kubernetes/main.tf index b01f231b39..aea72e7905 100644 --- a/infrastructure/kubernetes/main.tf +++ b/infrastructure/kubernetes/main.tf @@ -9,7 +9,7 @@ terraform { data "terraform_remote_state" "core" { backend = "s3" - config = { + config = { bucket = var.tf_state_bucket region = var.aws_region key = "core.tfstate" @@ -20,12 +20,29 @@ data "aws_eks_cluster" "cluster" { name = data.terraform_remote_state.core.outputs.eks_cluster_name } +data "google_container_cluster" "cluster" { + name = data.terraform_remote_state.core.outputs.gke_cluster_name + location = var.gcp_zone + project = var.gcp_project_id +} + + +resource "aws_iam_access_key" "access_key" { + user = data.terraform_remote_state.core.outputs.raw_s3_reader.name +} + module "k8s_infrastructure" { source = "./modules/k8s_infrastructure" cluster_name = data.terraform_remote_state.core.outputs.eks_cluster_name aws_region = var.aws_region vpc_id = data.aws_eks_cluster.cluster.vpc_config[0].vpc_id deploy_metrics_server = false + + providers = { + helm = helm.aws_helm + kubectl = kubectl.aws_kubectl + kubernetes = kubernetes.aws_kubernetes + } } resource "github_actions_secret" "mapbox_api_token_secret" { @@ -34,20 +51,20 @@ resource "github_actions_secret" "mapbox_api_token_secret" { plaintext_value = var.mapbox_api_token } -module "environment" { - for_each = merge(var.environments, { +module "aws_environment" { + for_each = merge(var.aws_environments, { staging = merge({ load_fresh_data = false data_import_arguments = ["seed-data"] image_tag = "staging" - }, lookup(var.environments, "staging", {})), + }, lookup(var.aws_environments, "staging", {})), production = merge({ load_fresh_data = false data_import_arguments = ["seed-data"] image_tag = "main" - }, lookup(var.environments, "production", {})), + }, lookup(var.aws_environments, "production", {})), }) - source = "./modules/env" + source = "./modules/aws/env" cluster_name = data.terraform_remote_state.core.outputs.eks_cluster_name project_name = var.project_name @@ -59,15 +76,56 @@ module "environment" { load_fresh_data = lookup(each.value, "load_fresh_data", false) data_import_arguments = lookup(each.value, "data_import_arguments", ["seed-data"]) image_tag = lookup(each.value, "image_tag", each.key) + repo_branch = lookup(each.value, "image_tag", each.key) + private_subnet_ids = data.terraform_remote_state.core.outputs.private_subnet_ids + repo_name = var.repo_name + domain = var.domain + api_container_registry_url = data.terraform_remote_state.core.outputs.aws_api_container_registry_url + client_container_registry_url = data.terraform_remote_state.core.outputs.aws_client_container_registry_url + tiler_container_registry_url = data.terraform_remote_state.core.outputs.aws_tiler_container_registry_url + data_import_container_registry_url = data.terraform_remote_state.core.outputs.aws_data_import_container_registry_url + api_env_vars = lookup(each.value, "api_env_vars", []) + api_secrets = lookup(each.value, "api_secrets", []) + science_bucket_name = data.terraform_remote_state.core.outputs.science_bucket_name + + providers = { + kubernetes = kubernetes.aws_kubernetes + helm = helm.aws_helm + } +} + +module "gcp_environment" { + for_each = var.gcp_environments + source = "./modules/gcp/env" + + cluster_name = data.google_container_cluster.cluster.name + project_name = var.project_name + environment = each.key + tf_state_bucket = var.tf_state_bucket + allowed_account_id = var.allowed_account_id + gmaps_api_key = var.gmaps_api_key + load_fresh_data = lookup(each.value, "load_fresh_data", false) + data_import_arguments = lookup(each.value, "data_import_arguments", ["seed-data"]) + image_tag = "latest" + repo_branch = lookup(each.value, "image_tag", each.key) private_subnet_ids = data.terraform_remote_state.core.outputs.private_subnet_ids repo_name = var.repo_name domain = var.domain - api_container_registry_url = data.terraform_remote_state.core.outputs.api_container_registry_url - client_container_registry_url = data.terraform_remote_state.core.outputs.client_container_registry_url - tiler_container_registry_url = data.terraform_remote_state.core.outputs.tiler_container_registry_url - data_import_container_registry_url = data.terraform_remote_state.core.outputs.data_import_container_registry_url + api_container_registry_url = data.terraform_remote_state.core.outputs.gcp_api_container_registry_url + client_container_registry_url = data.terraform_remote_state.core.outputs.gcp_client_container_registry_url + tiler_container_registry_url = data.terraform_remote_state.core.outputs.gcp_tiler_container_registry_url + data_import_container_registry_url = data.terraform_remote_state.core.outputs.gcp_data_import_container_registry_url api_env_vars = lookup(each.value, "api_env_vars", []) api_secrets = lookup(each.value, "api_secrets", []) science_bucket_name = data.terraform_remote_state.core.outputs.science_bucket_name + gcp_project = var.gcp_project_id + gcp_region = var.gcp_region + gcp_zone = var.gcp_zone + aws_access_key_id = aws_iam_access_key.access_key.id + aws_secret_access_key = aws_iam_access_key.access_key.secret + providers = { + kubernetes = kubernetes.gcp_kubernetes + helm = helm.gcp_helm + } } diff --git a/infrastructure/kubernetes/modules/api/main.tf b/infrastructure/kubernetes/modules/api/main.tf index c02b7e61aa..3a54da1903 100644 --- a/infrastructure/kubernetes/modules/api/main.tf +++ b/infrastructure/kubernetes/modules/api/main.tf @@ -1,7 +1,3 @@ -data "aws_eks_cluster_auth" "cluster" { - name = var.cluster_name -} - resource "kubernetes_service" "api_service" { metadata { name = kubernetes_deployment.api_deployment.metadata[0].name diff --git a/infrastructure/kubernetes/modules/api/variable.tf b/infrastructure/kubernetes/modules/api/variable.tf index 5f347b09fe..77470d6336 100644 --- a/infrastructure/kubernetes/modules/api/variable.tf +++ b/infrastructure/kubernetes/modules/api/variable.tf @@ -1,8 +1,3 @@ -variable "cluster_name" { - type = string - description = "The k8s cluster name" -} - variable "image" { type = string description = "The dockerhub image reference to deploy" diff --git a/infrastructure/kubernetes/modules/database/main.tf b/infrastructure/kubernetes/modules/aws/database/main.tf similarity index 100% rename from infrastructure/kubernetes/modules/database/main.tf rename to infrastructure/kubernetes/modules/aws/database/main.tf diff --git a/infrastructure/kubernetes/modules/database/values.yaml b/infrastructure/kubernetes/modules/aws/database/values.yaml similarity index 100% rename from infrastructure/kubernetes/modules/database/values.yaml rename to infrastructure/kubernetes/modules/aws/database/values.yaml diff --git a/infrastructure/kubernetes/modules/aws/database/variable.tf b/infrastructure/kubernetes/modules/aws/database/variable.tf new file mode 100644 index 0000000000..26939be627 --- /dev/null +++ b/infrastructure/kubernetes/modules/aws/database/variable.tf @@ -0,0 +1,16 @@ +variable "namespace" { + type = string + description = "The k8s namespace to use" +} + +variable "username" { + type = string +} + +variable "password" { + type = string +} + +variable "database" { + type = string +} diff --git a/infrastructure/kubernetes/modules/database/versions.tf b/infrastructure/kubernetes/modules/aws/database/versions.tf similarity index 100% rename from infrastructure/kubernetes/modules/database/versions.tf rename to infrastructure/kubernetes/modules/aws/database/versions.tf diff --git a/infrastructure/kubernetes/modules/env/main.tf b/infrastructure/kubernetes/modules/aws/env/main.tf similarity index 85% rename from infrastructure/kubernetes/modules/env/main.tf rename to infrastructure/kubernetes/modules/aws/env/main.tf index b07bd546db..56957e138d 100644 --- a/infrastructure/kubernetes/modules/env/main.tf +++ b/infrastructure/kubernetes/modules/aws/env/main.tf @@ -1,12 +1,10 @@ module "k8s_namespace" { - source = "../k8s_namespace" - cluster_name = var.cluster_name + source = "../../k8s_namespace" namespace = var.environment } module "k8s_database" { source = "../database" - cluster_name = var.cluster_name namespace = var.environment username = module.k8s_secrets.postgres_username password = module.k8s_secrets.postgres_password @@ -19,7 +17,6 @@ module "k8s_database" { module "k8s_redis" { source = "../redis" - cluster_name = var.cluster_name namespace = var.environment depends_on = [ @@ -28,8 +25,7 @@ module "k8s_redis" { } module "k8s_api" { - source = "../api" - cluster_name = var.cluster_name + source = "../../api" deployment_name = "api" image = "${var.api_container_registry_url}:${var.image_tag}" namespace = var.environment @@ -109,8 +105,7 @@ module "k8s_api" { module "k8s_tiler" { - source = "../tiler" - cluster_name = var.cluster_name + source = "../../tiler" deployment_name = "tiler" image = "${var.tiler_container_registry_url}:${var.image_tag}" namespace = var.environment @@ -159,8 +154,7 @@ module "k8s_tiler" { module "k8s_client" { - source = "../client" - cluster_name = var.cluster_name + source = "../../client" deployment_name = "client" image = "${var.client_container_registry_url}:${var.image_tag}" namespace = var.environment @@ -173,14 +167,40 @@ module "k8s_client" { } module "k8s_data_import" { - source = "../data_import" - cluster_name = var.cluster_name + source = "../../data_import" job_name = "data-import" image = "${var.data_import_container_registry_url}:${var.image_tag}" namespace = var.environment load_data = var.load_fresh_data arguments = var.data_import_arguments + env_vars = [ + { + name = "API_POSTGRES_PORT" + value = "5432" + } + ] + + secrets = [ + { + name = "API_POSTGRES_HOST" + secret_name = "db" + secret_key = "DB_HOST" + }, { + name = "API_POSTGRES_USERNAME" + secret_name = "db" + secret_key = "DB_USERNAME" + }, { + name = "API_POSTGRES_PASSWORD" + secret_name = "db" + secret_key = "DB_PASSWORD" + }, { + name = "API_POSTGRES_DATABASE" + secret_name = "db" + secret_key = "DB_DATABASE" + } + ] + depends_on = [ module.k8s_namespace, module.k8s_database, @@ -190,7 +210,6 @@ module "k8s_data_import" { module "k8s_secrets" { source = "../secrets" - cluster_name = var.cluster_name tf_state_bucket = var.tf_state_bucket aws_region = var.aws_region allowed_account_id = var.allowed_account_id @@ -204,7 +223,6 @@ module "k8s_secrets" { module "k8s_ingress" { source = "../ingress" - cluster_name = var.cluster_name allowed_account_id = var.allowed_account_id aws_region = var.aws_region namespace = var.environment @@ -233,14 +251,14 @@ module "data-import-group" { } module "github_actions_frontend_secrets" { - source = "../github_secrets" + source = "../../github_secrets" repo_name = var.repo_name - branch = var.image_tag + branch = var.repo_branch domain = var.domain } #module "data_import" { -# source = "../modules/fargate" +# source = "../../modules/fargate" # namespace = var.environment # postgresql_port = module.k8s_database.postgresql_service_port #} diff --git a/infrastructure/kubernetes/modules/env/variables.tf b/infrastructure/kubernetes/modules/aws/env/variables.tf similarity index 96% rename from infrastructure/kubernetes/modules/env/variables.tf rename to infrastructure/kubernetes/modules/aws/env/variables.tf index ddb12af7bd..d70b423070 100644 --- a/infrastructure/kubernetes/modules/env/variables.tf +++ b/infrastructure/kubernetes/modules/aws/env/variables.tf @@ -20,6 +20,11 @@ variable "repo_name" { description = "Name of the github repo where the project is hosted" } +variable "repo_branch" { + type = string + description = "The github branch to use" +} + variable "cluster_name" { type = string description = "The k8s cluster name" diff --git a/infrastructure/kubernetes/modules/redis/versions.tf b/infrastructure/kubernetes/modules/aws/env/versions.tf similarity index 100% rename from infrastructure/kubernetes/modules/redis/versions.tf rename to infrastructure/kubernetes/modules/aws/env/versions.tf diff --git a/infrastructure/kubernetes/modules/fargate/main.tf b/infrastructure/kubernetes/modules/aws/fargate/main.tf similarity index 100% rename from infrastructure/kubernetes/modules/fargate/main.tf rename to infrastructure/kubernetes/modules/aws/fargate/main.tf diff --git a/infrastructure/kubernetes/modules/fargate/variable.tf b/infrastructure/kubernetes/modules/aws/fargate/variable.tf similarity index 100% rename from infrastructure/kubernetes/modules/fargate/variable.tf rename to infrastructure/kubernetes/modules/aws/fargate/variable.tf diff --git a/infrastructure/kubernetes/modules/ingress/main.tf b/infrastructure/kubernetes/modules/aws/ingress/main.tf similarity index 98% rename from infrastructure/kubernetes/modules/ingress/main.tf rename to infrastructure/kubernetes/modules/aws/ingress/main.tf index 7ecac2d84c..6f4e77d5f4 100644 --- a/infrastructure/kubernetes/modules/ingress/main.tf +++ b/infrastructure/kubernetes/modules/aws/ingress/main.tf @@ -1,7 +1,3 @@ -data "aws_eks_cluster_auth" "cluster" { - name = var.cluster_name -} - locals { api_domain = "api.${var.namespace != "production" ? ("${var.namespace}.") : ""}${var.domain}" client_domain = "client.${var.namespace != "production" ? ("${var.namespace}.") : ""}${var.domain}" diff --git a/infrastructure/kubernetes/modules/ingress/output.tf b/infrastructure/kubernetes/modules/aws/ingress/output.tf similarity index 100% rename from infrastructure/kubernetes/modules/ingress/output.tf rename to infrastructure/kubernetes/modules/aws/ingress/output.tf diff --git a/infrastructure/kubernetes/modules/ingress/variable.tf b/infrastructure/kubernetes/modules/aws/ingress/variable.tf similarity index 82% rename from infrastructure/kubernetes/modules/ingress/variable.tf rename to infrastructure/kubernetes/modules/aws/ingress/variable.tf index d0a4ae6495..f372075546 100644 --- a/infrastructure/kubernetes/modules/ingress/variable.tf +++ b/infrastructure/kubernetes/modules/aws/ingress/variable.tf @@ -3,11 +3,6 @@ variable "namespace" { description = "The k8s namespace to use" } -variable "cluster_name" { - type = string - description = "The k8s cluster name" -} - variable "allowed_account_id" { type = string description = "Allowed AWS Account ID" diff --git a/infrastructure/kubernetes/modules/ingress/versions.tf b/infrastructure/kubernetes/modules/aws/ingress/versions.tf similarity index 100% rename from infrastructure/kubernetes/modules/ingress/versions.tf rename to infrastructure/kubernetes/modules/aws/ingress/versions.tf diff --git a/infrastructure/kubernetes/modules/node_group/main.tf b/infrastructure/kubernetes/modules/aws/node_group/main.tf similarity index 100% rename from infrastructure/kubernetes/modules/node_group/main.tf rename to infrastructure/kubernetes/modules/aws/node_group/main.tf diff --git a/infrastructure/kubernetes/modules/node_group/outputs.tf b/infrastructure/kubernetes/modules/aws/node_group/outputs.tf similarity index 100% rename from infrastructure/kubernetes/modules/node_group/outputs.tf rename to infrastructure/kubernetes/modules/aws/node_group/outputs.tf diff --git a/infrastructure/kubernetes/modules/node_group/variable.tf b/infrastructure/kubernetes/modules/aws/node_group/variable.tf similarity index 100% rename from infrastructure/kubernetes/modules/node_group/variable.tf rename to infrastructure/kubernetes/modules/aws/node_group/variable.tf diff --git a/infrastructure/kubernetes/modules/redis/main.tf b/infrastructure/kubernetes/modules/aws/redis/main.tf similarity index 100% rename from infrastructure/kubernetes/modules/redis/main.tf rename to infrastructure/kubernetes/modules/aws/redis/main.tf diff --git a/infrastructure/kubernetes/modules/redis/outputs.tf b/infrastructure/kubernetes/modules/aws/redis/outputs.tf similarity index 100% rename from infrastructure/kubernetes/modules/redis/outputs.tf rename to infrastructure/kubernetes/modules/aws/redis/outputs.tf diff --git a/infrastructure/kubernetes/modules/redis/values.yaml b/infrastructure/kubernetes/modules/aws/redis/values.yaml similarity index 100% rename from infrastructure/kubernetes/modules/redis/values.yaml rename to infrastructure/kubernetes/modules/aws/redis/values.yaml diff --git a/infrastructure/kubernetes/modules/aws/redis/variable.tf b/infrastructure/kubernetes/modules/aws/redis/variable.tf new file mode 100644 index 0000000000..72634803ef --- /dev/null +++ b/infrastructure/kubernetes/modules/aws/redis/variable.tf @@ -0,0 +1,4 @@ +variable "namespace" { + type = string + description = "The k8s namespace to use" +} diff --git a/infrastructure/kubernetes/modules/aws/redis/versions.tf b/infrastructure/kubernetes/modules/aws/redis/versions.tf new file mode 100644 index 0000000000..9bd04dabf0 --- /dev/null +++ b/infrastructure/kubernetes/modules/aws/redis/versions.tf @@ -0,0 +1,14 @@ +terraform { + required_providers { + kubernetes = { + source = "hashicorp/kubernetes" + version = "~> 2.14.0" + } + + helm = { + source = "hashicorp/helm" + version = "~> 2.7.0" + } + } + required_version = "~> 1.3.2" +} diff --git a/infrastructure/kubernetes/modules/secrets/main.tf b/infrastructure/kubernetes/modules/aws/secrets/main.tf similarity index 88% rename from infrastructure/kubernetes/modules/secrets/main.tf rename to infrastructure/kubernetes/modules/aws/secrets/main.tf index 1ce1c521b7..87f89082a4 100644 --- a/infrastructure/kubernetes/modules/secrets/main.tf +++ b/infrastructure/kubernetes/modules/aws/secrets/main.tf @@ -1,7 +1,3 @@ -data "aws_eks_cluster_auth" "cluster" { - name = var.cluster_name -} - locals { postgres_secret_json = { username = "landgriffon-${var.namespace}" @@ -19,20 +15,6 @@ locals { resource "random_password" "jwt_secret_generator" { length = 64 special = true - # lifecycle { - # ignore_changes = [ - # length, - # lower, - # min_lower, - # min_numeric, - # min_special, - # min_upper, - # number, - # special, - # upper, - # - # ] - # } } resource "aws_secretsmanager_secret" "api_secret" { diff --git a/infrastructure/kubernetes/modules/secrets/outputs.tf b/infrastructure/kubernetes/modules/aws/secrets/outputs.tf similarity index 100% rename from infrastructure/kubernetes/modules/secrets/outputs.tf rename to infrastructure/kubernetes/modules/aws/secrets/outputs.tf diff --git a/infrastructure/kubernetes/modules/secrets/variable.tf b/infrastructure/kubernetes/modules/aws/secrets/variable.tf similarity index 86% rename from infrastructure/kubernetes/modules/secrets/variable.tf rename to infrastructure/kubernetes/modules/aws/secrets/variable.tf index 294c38f5bd..bbe218d70a 100644 --- a/infrastructure/kubernetes/modules/secrets/variable.tf +++ b/infrastructure/kubernetes/modules/aws/secrets/variable.tf @@ -8,11 +8,6 @@ variable "namespace" { description = "The k8s namespace to use" } -variable "cluster_name" { - type = string - description = "The k8s cluster name" -} - variable "aws_region" { type = string description = "The name of the AWS region where the cluster lives" diff --git a/infrastructure/kubernetes/modules/secrets/versions.tf b/infrastructure/kubernetes/modules/aws/secrets/versions.tf similarity index 100% rename from infrastructure/kubernetes/modules/secrets/versions.tf rename to infrastructure/kubernetes/modules/aws/secrets/versions.tf diff --git a/infrastructure/kubernetes/modules/client/main.tf b/infrastructure/kubernetes/modules/client/main.tf index f7a1d8e300..f24858f550 100644 --- a/infrastructure/kubernetes/modules/client/main.tf +++ b/infrastructure/kubernetes/modules/client/main.tf @@ -1,11 +1,10 @@ -data "aws_eks_cluster_auth" "cluster" { - name = var.cluster_name -} - resource "kubernetes_service" "client_service" { metadata { - name = kubernetes_deployment.client_deployment.metadata[0].name - namespace = var.namespace + name = kubernetes_deployment.client_deployment.metadata[0].name + namespace = var.namespace + annotations = { + "alb.ingress.kubernetes.io/healthcheck-path" = "/auth/signin" + } } spec { selector = { diff --git a/infrastructure/kubernetes/modules/client/variable.tf b/infrastructure/kubernetes/modules/client/variable.tf index f5239996d3..7eb6e1ff88 100644 --- a/infrastructure/kubernetes/modules/client/variable.tf +++ b/infrastructure/kubernetes/modules/client/variable.tf @@ -1,8 +1,3 @@ -variable "cluster_name" { - type = string - description = "The k8s cluster name" -} - variable "image" { type = string description = "The dockerhub image reference to deploy" diff --git a/infrastructure/kubernetes/modules/data_import/main.tf b/infrastructure/kubernetes/modules/data_import/main.tf index 516cad00c5..76e0803f25 100644 --- a/infrastructure/kubernetes/modules/data_import/main.tf +++ b/infrastructure/kubernetes/modules/data_import/main.tf @@ -1,7 +1,3 @@ -data "aws_eks_cluster_auth" "cluster" { - name = var.cluster_name -} - resource "kubernetes_job" "data_import" { count = var.load_data ? 1 : 0 @@ -51,55 +47,29 @@ resource "kubernetes_job" "data_import" { args = var.arguments - env { - name = "API_POSTGRES_HOST" - value_from { - secret_key_ref { - name = "db" - key = "DB_HOST" - } - } - } - - env { - name = "API_POSTGRES_PORT" - value = "5432" - } - - env { - name = "API_POSTGRES_USERNAME" - value_from { - secret_key_ref { - name = "db" - key = "DB_USERNAME" - } - } - } - - env { - name = "API_POSTGRES_PASSWORD" - value_from { - secret_key_ref { - name = "db" - key = "DB_PASSWORD" - } - } - } + dynamic "env" { + for_each = concat(var.env_vars, var.secrets) + content { + name = env.value["name"] + dynamic "value_from" { + for_each = lookup(env.value, "secret_name", null) != null ? [1] : [] + content { + secret_key_ref { + + name = env.value["secret_name"] + key = env.value["secret_key"] + } + } - env { - name = "API_POSTGRES_DATABASE" - value_from { - secret_key_ref { - name = "db" - key = "DB_DATABASE" } + value = lookup(env.value, "value", null) != null ? env.value["value"] : null } } resources { requests = { cpu = "15" - memory = "120Gi" + memory = "110Gi" } } } diff --git a/infrastructure/kubernetes/modules/data_import/variable.tf b/infrastructure/kubernetes/modules/data_import/variable.tf index 8c04852c85..490433bd7b 100644 --- a/infrastructure/kubernetes/modules/data_import/variable.tf +++ b/infrastructure/kubernetes/modules/data_import/variable.tf @@ -1,8 +1,3 @@ -variable "cluster_name" { - type = string - description = "The k8s cluster name" -} - variable "image" { type = string description = "The dockerhub image reference to deploy" @@ -28,3 +23,22 @@ variable "load_data" { default = false description = "If new data should be loaded when this terraform plan is applied. Clears the current database." } + +variable "env_vars" { + type = list(object({ + name = string + value = string + })) + description = "Key-value pairs of env vars to make available to the container" + default = [] +} + +variable "secrets" { + type = list(object({ + name = string + secret_name = string + secret_key = string + })) + description = "List of secrets to make available to the container" + default = [] +} diff --git a/infrastructure/kubernetes/modules/gcp/database/main.tf b/infrastructure/kubernetes/modules/gcp/database/main.tf new file mode 100644 index 0000000000..2bdac1cd6a --- /dev/null +++ b/infrastructure/kubernetes/modules/gcp/database/main.tf @@ -0,0 +1,115 @@ +resource "google_project_service" "secret_manager_api" { + service = "secretmanager.googleapis.com" + disable_on_destroy = false +} + +resource "google_secret_manager_secret" "postgresql_admin_secret" { + secret_id = "postgresql-admin-credentials-${var.namespace}" + + replication { + user_managed { + replicas { + location = var.region + } + } + } + + depends_on = [google_project_service.secret_manager_api] +} + +resource "google_secret_manager_secret_version" "postgresql_admin_secret_version" { + secret = google_secret_manager_secret.postgresql_admin_secret.id + + secret_data = jsonencode(local.postgres_secret_json) +} + +locals { + postgres_secret_json = { + username = "postgres" + admin_password = random_password.postgresql_admin_generator.result + } +} + +resource "random_password" "postgresql_admin_generator" { + length = 24 + special = true +} + +resource "helm_release" "postgres" { + name = "postgres" + repository = "https://charts.bitnami.com/bitnami" + chart = "postgresql" + version = "11.6.15" + namespace = var.namespace + + values = [ + file("${path.module}/values.yaml") + ] + + set { + name = "auth.username" + value = sensitive(var.username) + } + + set { + name = "auth.password" + value = sensitive(var.password) + } + + set { + name = "auth.database" + value = sensitive(var.database) + } + + set { + name = "auth.postgresPassword" + value = sensitive(local.postgres_secret_json.admin_password) + } + + set { + name = "image.repository" + value = "vizzuality/landgriffon-database" + } + + set { + name = "image.tag" + value = "latest" + } + + set { + name = "primary.persistence.size" + value = "60Gi" + } + + set { + name = "postgresql.podSecurityContext.enabled" + value = false + } + + set { + name = "primary.affinity" + type = "auto" + value = yamlencode({ + nodeAffinity = { + requiredDuringSchedulingIgnoredDuringExecution = { + nodeSelectorTerms = [ + { + matchExpressions = [{ + key = "type", + operator = "In", + values = ["default"] + }] + } + ] + } + } + }) + } +} + +data "kubernetes_service" "postgresql" { + metadata { + name = "postgres-postgresql" + namespace = var.namespace + } +} diff --git a/infrastructure/kubernetes/modules/gcp/database/values.yaml b/infrastructure/kubernetes/modules/gcp/database/values.yaml new file mode 100644 index 0000000000..f9538f7de3 --- /dev/null +++ b/infrastructure/kubernetes/modules/gcp/database/values.yaml @@ -0,0 +1,1374 @@ +## @section Global parameters +## Please, note that this will override the parameters, including dependencies, configured to use the global value +## +global: + ## @param global.imageRegistry Global Docker image registry + ## + imageRegistry: "" + ## @param global.imagePullSecrets Global Docker registry secret names as an array + ## e.g. + ## imagePullSecrets: + ## - myRegistryKeySecretName + ## + imagePullSecrets: [] + ## @param global.storageClass Global StorageClass for Persistent Volume(s) + ## + storageClass: "" + postgresql: + ## @param global.postgresql.auth.postgresPassword Password for the "postgres" admin user (overrides `auth.postgresPassword`) + ## @param global.postgresql.auth.username Name for a custom user to create (overrides `auth.username`) + ## @param global.postgresql.auth.password Password for the custom user to create (overrides `auth.password`) + ## @param global.postgresql.auth.database Name for a custom database to create (overrides `auth.database`) + ## @param global.postgresql.auth.existingSecret Name of existing secret to use for PostgreSQL credentials (overrides `auth.existingSecret`). + ## @param global.postgresql.auth.secretKeys.adminPasswordKey Name of key in existing secret to use for PostgreSQL credentials (overrides `auth.secretKeys.adminPasswordKey`). Only used when `global.postgresql.auth.existingSecret` is set. + ## @param global.postgresql.auth.secretKeys.userPasswordKey Name of key in existing secret to use for PostgreSQL credentials (overrides `auth.secretKeys.userPasswordKey`). Only used when `global.postgresql.auth.existingSecret` is set. + ## @param global.postgresql.auth.secretKeys.replicationPasswordKey Name of key in existing secret to use for PostgreSQL credentials (overrides `auth.secretKeys.replicationPasswordKey`). Only used when `global.postgresql.auth.existingSecret` is set. + ## + auth: + postgresPassword: "" + username: "" + password: "" + database: "" + existingSecret: "" + secretKeys: + adminPasswordKey: "" + userPasswordKey: "" + replicationPasswordKey: "" + ## @param global.postgresql.service.ports.postgresql PostgreSQL service port (overrides `service.ports.postgresql`) + ## + service: + ports: + postgresql: "" + +## @section Common parameters +## + +## @param kubeVersion Override Kubernetes version +## +kubeVersion: "" +## @param nameOverride String to partially override common.names.fullname template (will maintain the release name) +## +nameOverride: "" +## @param fullnameOverride String to fully override common.names.fullname template +## +fullnameOverride: "" +## @param clusterDomain Kubernetes Cluster Domain +## +clusterDomain: cluster.local +## @param extraDeploy Array of extra objects to deploy with the release (evaluated as a template) +## +extraDeploy: [] +## @param commonLabels Add labels to all the deployed resources +## +commonLabels: {} +## @param commonAnnotations Add annotations to all the deployed resources +## +commonAnnotations: {} +## Enable diagnostic mode in the statefulset +## +diagnosticMode: + ## @param diagnosticMode.enabled Enable diagnostic mode (all probes will be disabled and the command will be overridden) + ## + enabled: false + ## @param diagnosticMode.command Command to override all containers in the statefulset + ## + command: + - sleep + ## @param diagnosticMode.args Args to override all containers in the statefulset + ## + args: + - infinity + +## @section PostgreSQL common parameters +## + +## Bitnami PostgreSQL image version +## ref: https://hub.docker.com/r/bitnami/postgresql/tags/ +## @param image.registry PostgreSQL image registry +## @param image.repository PostgreSQL image repository +## @param image.tag PostgreSQL image tag (immutable tags are recommended) +## @param image.pullPolicy PostgreSQL image pull policy +## @param image.pullSecrets Specify image pull secrets +## @param image.debug Specify if debug values should be set +## +image: + registry: docker.io + repository: bitnami/postgresql + tag: 14.4.0-debian-11-r7 + ## Specify a imagePullPolicy + ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + ## ref: https://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## Example: + ## pullSecrets: + ## - myRegistryKeySecretName + ## + pullSecrets: [] + ## Set to true if you would like to see extra information on logs + ## + debug: false +## Authentication parameters +## ref: https://github.com/bitnami/bitnami-docker-postgresql/blob/master/README.md#setting-the-root-password-on-first-run +## ref: https://github.com/bitnami/bitnami-docker-postgresql/blob/master/README.md#creating-a-database-on-first-run +## ref: https://github.com/bitnami/bitnami-docker-postgresql/blob/master/README.md#creating-a-database-user-on-first-run +## +auth: + ## @param auth.enablePostgresUser Assign a password to the "postgres" admin user. Otherwise, remote access will be blocked for this user + ## + enablePostgresUser: true + ## @param auth.postgresPassword Password for the "postgres" admin user. Ignored if `auth.existingSecret` with key `postgres-password` is provided + ## + postgresPassword: "" + ## @param auth.username Name for a custom user to create + ## + username: "" + ## @param auth.password Password for the custom user to create. Ignored if `auth.existingSecret` with key `password` is provided + ## + password: "" + ## @param auth.database Name for a custom database to create + ## + database: "" + ## @param auth.replicationUsername Name of the replication user + ## + replicationUsername: repl_user + ## @param auth.replicationPassword Password for the replication user. Ignored if `auth.existingSecret` with key `replication-password` is provided + ## + replicationPassword: "" + ## @param auth.existingSecret Name of existing secret to use for PostgreSQL credentials. `auth.postgresPassword`, `auth.password`, and `auth.replicationPassword` will be ignored and picked up from this secret. The secret might also contains the key `ldap-password` if LDAP is enabled. `ldap.bind_password` will be ignored and picked from this secret in this case. + ## + existingSecret: "" + ## @param auth.secretKeys.adminPasswordKey Name of key in existing secret to use for PostgreSQL credentials. Only used when `auth.existingSecret` is set. + ## @param auth.secretKeys.userPasswordKey Name of key in existing secret to use for PostgreSQL credentials. Only used when `auth.existingSecret` is set. + ## @param auth.secretKeys.replicationPasswordKey Name of key in existing secret to use for PostgreSQL credentials. Only used when `auth.existingSecret` is set. + ## + secretKeys: + adminPasswordKey: postgres-password + userPasswordKey: password + replicationPasswordKey: replication-password + ## @param auth.usePasswordFiles Mount credentials as a files instead of using an environment variable + ## + usePasswordFiles: false +## @param architecture PostgreSQL architecture (`standalone` or `replication`) +## +architecture: standalone +## Replication configuration +## Ignored if `architecture` is `standalone` +## +replication: + ## @param replication.synchronousCommit Set synchronous commit mode. Allowed values: `on`, `remote_apply`, `remote_write`, `local` and `off` + ## @param replication.numSynchronousReplicas Number of replicas that will have synchronous replication. Note: Cannot be greater than `readReplicas.replicaCount`. + ## ref: https://www.postgresql.org/docs/current/runtime-config-wal.html#GUC-SYNCHRONOUS-COMMIT + ## + synchronousCommit: "off" + numSynchronousReplicas: 0 + ## @param replication.applicationName Cluster application name. Useful for advanced replication settings + ## + applicationName: my_application +## @param containerPorts.postgresql PostgreSQL container port +## +containerPorts: + postgresql: 5432 +## Audit settings +## https://github.com/bitnami/bitnami-docker-postgresql#auditing +## @param audit.logHostname Log client hostnames +## @param audit.logConnections Add client log-in operations to the log file +## @param audit.logDisconnections Add client log-outs operations to the log file +## @param audit.pgAuditLog Add operations to log using the pgAudit extension +## @param audit.pgAuditLogCatalog Log catalog using pgAudit +## @param audit.clientMinMessages Message log level to share with the user +## @param audit.logLinePrefix Template for log line prefix (default if not set) +## @param audit.logTimezone Timezone for the log timestamps +## +audit: + logHostname: false + logConnections: false + logDisconnections: false + pgAuditLog: "" + pgAuditLogCatalog: "off" + clientMinMessages: error + logLinePrefix: "" + logTimezone: "" +## LDAP configuration +## @param ldap.enabled Enable LDAP support +## DEPRECATED ldap.url It will removed in a future, please use 'ldap.uri' instead +## @param ldap.server IP address or name of the LDAP server. +## @param ldap.port Port number on the LDAP server to connect to +## @param ldap.prefix String to prepend to the user name when forming the DN to bind +## @param ldap.suffix String to append to the user name when forming the DN to bind +## DEPRECATED ldap.baseDN It will removed in a future, please use 'ldap.basedn' instead +## DEPRECATED ldap.bindDN It will removed in a future, please use 'ldap.binddn' instead +## DEPRECATED ldap.bind_password It will removed in a future, please use 'ldap.bindpw' instead +## @param ldap.basedn Root DN to begin the search for the user in +## @param ldap.binddn DN of user to bind to LDAP +## @param ldap.bindpw Password for the user to bind to LDAP +## DEPRECATED ldap.search_attr It will removed in a future, please use 'ldap.searchAttribute' instead +## DEPRECATED ldap.search_filter It will removed in a future, please use 'ldap.searchFilter' instead +## @param ldap.searchAttribute Attribute to match against the user name in the search +## @param ldap.searchFilter The search filter to use when doing search+bind authentication +## @param ldap.scheme Set to `ldaps` to use LDAPS +## DEPRECATED ldap.tls as string is deprecated,please use 'ldap.tls.enabled' instead +## @param ldap.tls.enabled Se to true to enable TLS encryption +## +ldap: + enabled: false + server: "" + port: "" + prefix: "" + suffix: "" + basedn: "" + binddn: "" + bindpw: "" + searchAttribute: "" + searchFilter: "" + scheme: "" + tls: + enabled: false + ## @param ldap.uri LDAP URL beginning in the form `ldap[s]://host[:port]/basedn`. If provided, all the other LDAP parameters will be ignored. + ## Ref: https://www.postgresql.org/docs/current/auth-ldap.html + uri: "" +## @param postgresqlDataDir PostgreSQL data dir folder +## +postgresqlDataDir: /bitnami/postgresql/data +## @param postgresqlSharedPreloadLibraries Shared preload libraries (comma-separated list) +## +postgresqlSharedPreloadLibraries: "pgaudit" +## Start PostgreSQL pod(s) without limitations on shm memory. +## By default docker and containerd (and possibly other container runtimes) limit `/dev/shm` to `64M` +## ref: https://github.com/docker-library/postgres/issues/416 +## ref: https://github.com/containerd/containerd/issues/3654 +## +shmVolume: + ## @param shmVolume.enabled Enable emptyDir volume for /dev/shm for PostgreSQL pod(s) + ## + enabled: true + ## @param shmVolume.sizeLimit Set this to enable a size limit on the shm tmpfs + ## Note: the size of the tmpfs counts against container's memory limit + ## e.g: + ## sizeLimit: 1Gi + ## + sizeLimit: "" +## TLS configuration +## +tls: + ## @param tls.enabled Enable TLS traffic support + ## + enabled: false + ## @param tls.autoGenerated Generate automatically self-signed TLS certificates + ## + autoGenerated: false + ## @param tls.preferServerCiphers Whether to use the server's TLS cipher preferences rather than the client's + ## + preferServerCiphers: true + ## @param tls.certificatesSecret Name of an existing secret that contains the certificates + ## + certificatesSecret: "" + ## @param tls.certFilename Certificate filename + ## + certFilename: "" + ## @param tls.certKeyFilename Certificate key filename + ## + certKeyFilename: "" + ## @param tls.certCAFilename CA Certificate filename + ## If provided, PostgreSQL will authenticate TLS/SSL clients by requesting them a certificate + ## ref: https://www.postgresql.org/docs/9.6/auth-methods.html + ## + certCAFilename: "" + ## @param tls.crlFilename File containing a Certificate Revocation List + ## + crlFilename: "" + +## @section PostgreSQL Primary parameters +## +primary: + ## @param primary.configuration PostgreSQL Primary main configuration to be injected as ConfigMap + ## ref: https://www.postgresql.org/docs/current/static/runtime-config.html + ## + configuration: "" + ## @param primary.pgHbaConfiguration PostgreSQL Primary client authentication configuration + ## ref: https://www.postgresql.org/docs/current/static/auth-pg-hba-conf.html + ## e.g:# + ## pgHbaConfiguration: |- + ## local all all trust + ## host all all localhost trust + ## host mydatabase mysuser 192.168.0.0/24 md5 + ## + pgHbaConfiguration: "" + ## @param primary.existingConfigmap Name of an existing ConfigMap with PostgreSQL Primary configuration + ## NOTE: `primary.configuration` and `primary.pgHbaConfiguration` will be ignored + ## + existingConfigmap: "" + ## @param primary.extendedConfiguration Extended PostgreSQL Primary configuration (appended to main or default configuration) + ## ref: https://github.com/bitnami/bitnami-docker-postgresql#allow-settings-to-be-loaded-from-files-other-than-the-default-postgresqlconf + ## + extendedConfiguration: "" + ## @param primary.existingExtendedConfigmap Name of an existing ConfigMap with PostgreSQL Primary extended configuration + ## NOTE: `primary.extendedConfiguration` will be ignored + ## + existingExtendedConfigmap: "" + ## Initdb configuration + ## ref: https://github.com/bitnami/bitnami-docker-postgresql/blob/master/README.md#specifying-initdb-arguments + ## + initdb: + ## @param primary.initdb.args PostgreSQL initdb extra arguments + ## + args: "" + ## @param primary.initdb.postgresqlWalDir Specify a custom location for the PostgreSQL transaction log + ## + postgresqlWalDir: "" + ## @param primary.initdb.scripts Dictionary of initdb scripts + ## Specify dictionary of scripts to be run at first boot + ## e.g: + ## scripts: + ## my_init_script.sh: | + ## #!/bin/sh + ## echo "Do something." + ## + scripts: {} + ## @param primary.initdb.scriptsConfigMap ConfigMap with scripts to be run at first boot + ## NOTE: This will override `primary.initdb.scripts` + ## + scriptsConfigMap: "" + ## @param primary.initdb.scriptsSecret Secret with scripts to be run at first boot (in case it contains sensitive information) + ## NOTE: This can work along `primary.initdb.scripts` or `primary.initdb.scriptsConfigMap` + ## + scriptsSecret: "" + ## @param primary.initdb.user Specify the PostgreSQL username to execute the initdb scripts + ## + user: "" + ## @param primary.initdb.password Specify the PostgreSQL password to execute the initdb scripts + ## + password: "" + ## Configure current cluster's primary server to be the standby server in other cluster. + ## This will allow cross cluster replication and provide cross cluster high availability. + ## You will need to configure pgHbaConfiguration if you want to enable this feature with local cluster replication enabled. + ## @param primary.standby.enabled Whether to enable current cluster's primary as standby server of another cluster or not + ## @param primary.standby.primaryHost The Host of replication primary in the other cluster + ## @param primary.standby.primaryPort The Port of replication primary in the other cluster + ## + standby: + enabled: false + primaryHost: "" + primaryPort: "" + ## @param primary.extraEnvVars Array with extra environment variables to add to PostgreSQL Primary nodes + ## e.g: + ## extraEnvVars: + ## - name: FOO + ## value: "bar" + ## + extraEnvVars: [] + ## @param primary.extraEnvVarsCM Name of existing ConfigMap containing extra env vars for PostgreSQL Primary nodes + ## + extraEnvVarsCM: "" + ## @param primary.extraEnvVarsSecret Name of existing Secret containing extra env vars for PostgreSQL Primary nodes + ## + extraEnvVarsSecret: "" + ## @param primary.command Override default container command (useful when using custom images) + ## + command: [] + ## @param primary.args Override default container args (useful when using custom images) + ## + args: [] + ## Configure extra options for PostgreSQL Primary containers' liveness, readiness and startup probes + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/#configure-probes + ## @param primary.livenessProbe.enabled Enable livenessProbe on PostgreSQL Primary containers + ## @param primary.livenessProbe.initialDelaySeconds Initial delay seconds for livenessProbe + ## @param primary.livenessProbe.periodSeconds Period seconds for livenessProbe + ## @param primary.livenessProbe.timeoutSeconds Timeout seconds for livenessProbe + ## @param primary.livenessProbe.failureThreshold Failure threshold for livenessProbe + ## @param primary.livenessProbe.successThreshold Success threshold for livenessProbe + ## + livenessProbe: + enabled: true + initialDelaySeconds: 30 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 6 + successThreshold: 1 + ## @param primary.readinessProbe.enabled Enable readinessProbe on PostgreSQL Primary containers + ## @param primary.readinessProbe.initialDelaySeconds Initial delay seconds for readinessProbe + ## @param primary.readinessProbe.periodSeconds Period seconds for readinessProbe + ## @param primary.readinessProbe.timeoutSeconds Timeout seconds for readinessProbe + ## @param primary.readinessProbe.failureThreshold Failure threshold for readinessProbe + ## @param primary.readinessProbe.successThreshold Success threshold for readinessProbe + ## + readinessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 6 + successThreshold: 1 + ## @param primary.startupProbe.enabled Enable startupProbe on PostgreSQL Primary containers + ## @param primary.startupProbe.initialDelaySeconds Initial delay seconds for startupProbe + ## @param primary.startupProbe.periodSeconds Period seconds for startupProbe + ## @param primary.startupProbe.timeoutSeconds Timeout seconds for startupProbe + ## @param primary.startupProbe.failureThreshold Failure threshold for startupProbe + ## @param primary.startupProbe.successThreshold Success threshold for startupProbe + ## + startupProbe: + enabled: false + initialDelaySeconds: 30 + periodSeconds: 10 + timeoutSeconds: 1 + failureThreshold: 15 + successThreshold: 1 + ## @param primary.customLivenessProbe Custom livenessProbe that overrides the default one + ## + customLivenessProbe: {} + ## @param primary.customReadinessProbe Custom readinessProbe that overrides the default one + ## + customReadinessProbe: {} + ## @param primary.customStartupProbe Custom startupProbe that overrides the default one + ## + customStartupProbe: {} + ## @param primary.lifecycleHooks for the PostgreSQL Primary container to automate configuration before or after startup + ## + lifecycleHooks: {} + ## PostgreSQL Primary resource requests and limits + ## ref: https://kubernetes.io/docs/user-guide/compute-resources/ + ## @param primary.resources.limits The resources limits for the PostgreSQL Primary containers + ## @param primary.resources.requests.memory The requested memory for the PostgreSQL Primary containers + ## @param primary.resources.requests.cpu The requested cpu for the PostgreSQL Primary containers + ## + resources: + limits: {} + requests: + memory: 256Mi + cpu: 250m + ## Pod Security Context + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ + ## @param primary.podSecurityContext.enabled Enable security context + ## @param primary.podSecurityContext.fsGroup Group ID for the pod + ## + podSecurityContext: + enabled: true + fsGroup: 1001 + ## Container Security Context + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ + ## @param primary.containerSecurityContext.enabled Enable container security context + ## @param primary.containerSecurityContext.runAsUser User ID for the container + ## + containerSecurityContext: + enabled: true + runAsUser: 1001 + ## @param primary.hostAliases PostgreSQL primary pods host aliases + ## https://kubernetes.io/docs/concepts/services-networking/add-entries-to-pod-etc-hosts-with-host-aliases/ + ## + hostAliases: [] + ## @param primary.hostNetwork Specify if host network should be enabled for PostgreSQL pod (postgresql primary) + ## + hostNetwork: false + ## @param primary.hostIPC Specify if host IPC should be enabled for PostgreSQL pod (postgresql primary) + ## + hostIPC: false + ## @param primary.labels Map of labels to add to the statefulset (postgresql primary) + ## + labels: {} + ## @param primary.annotations Annotations for PostgreSQL primary pods + ## + annotations: {} + ## @param primary.podLabels Map of labels to add to the pods (postgresql primary) + ## + podLabels: {} + ## @param primary.podAnnotations Map of annotations to add to the pods (postgresql primary) + ## + podAnnotations: {} + ## @param primary.podAffinityPreset PostgreSQL primary pod affinity preset. Ignored if `primary.affinity` is set. Allowed values: `soft` or `hard` + ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity + ## + podAffinityPreset: "" + ## @param primary.podAntiAffinityPreset PostgreSQL primary pod anti-affinity preset. Ignored if `primary.affinity` is set. Allowed values: `soft` or `hard` + ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity + ## + podAntiAffinityPreset: soft + ## PostgreSQL Primary node affinity preset + ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#node-affinity + ## + nodeAffinityPreset: + ## @param primary.nodeAffinityPreset.type PostgreSQL primary node affinity preset type. Ignored if `primary.affinity` is set. Allowed values: `soft` or `hard` + ## + type: "" + ## @param primary.nodeAffinityPreset.key PostgreSQL primary node label key to match Ignored if `primary.affinity` is set. + ## E.g. + ## key: "kubernetes.io/e2e-az-name" + ## + key: "" + ## @param primary.nodeAffinityPreset.values PostgreSQL primary node label values to match. Ignored if `primary.affinity` is set. + ## E.g. + ## values: + ## - e2e-az1 + ## - e2e-az2 + ## + values: [] + ## @param primary.affinity Affinity for PostgreSQL primary pods assignment + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity + ## Note: primary.podAffinityPreset, primary.podAntiAffinityPreset, and primary.nodeAffinityPreset will be ignored when it's set + ## + affinity: {} + ## @param primary.nodeSelector Node labels for PostgreSQL primary pods assignment + ## ref: https://kubernetes.io/docs/user-guide/node-selection/ + ## + nodeSelector: {} + ## @param primary.tolerations Tolerations for PostgreSQL primary pods assignment + ## ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ + ## + tolerations: [] + ## @param primary.topologySpreadConstraints Topology Spread Constraints for pod assignment spread across your cluster among failure-domains. Evaluated as a template + ## Ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/#spread-constraints-for-pods + ## + topologySpreadConstraints: [] + ## @param primary.priorityClassName Priority Class to use for each pod (postgresql primary) + ## + priorityClassName: "" + ## @param primary.schedulerName Use an alternate scheduler, e.g. "stork". + ## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ + ## + schedulerName: "" + ## @param primary.terminationGracePeriodSeconds Seconds PostgreSQL primary pod needs to terminate gracefully + ## ref: https://kubernetes.io/docs/concepts/workloads/pods/pod/#termination-of-pods + ## + terminationGracePeriodSeconds: "" + ## @param primary.updateStrategy.type PostgreSQL Primary statefulset strategy type + ## @param primary.updateStrategy.rollingUpdate PostgreSQL Primary statefulset rolling update configuration parameters + ## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#update-strategies + ## + updateStrategy: + type: RollingUpdate + rollingUpdate: {} + ## @param primary.extraVolumeMounts Optionally specify extra list of additional volumeMounts for the PostgreSQL Primary container(s) + ## + extraVolumeMounts: [] + ## @param primary.extraVolumes Optionally specify extra list of additional volumes for the PostgreSQL Primary pod(s) + ## + extraVolumes: [] + ## @param primary.sidecars Add additional sidecar containers to the PostgreSQL Primary pod(s) + ## For example: + ## sidecars: + ## - name: your-image-name + ## image: your-image + ## imagePullPolicy: Always + ## ports: + ## - name: portname + ## containerPort: 1234 + ## + sidecars: [] + ## @param primary.initContainers Add additional init containers to the PostgreSQL Primary pod(s) + ## Example + ## + ## initContainers: + ## - name: do-something + ## image: busybox + ## command: ['do', 'something'] + ## + initContainers: [] + ## @param primary.extraPodSpec Optionally specify extra PodSpec for the PostgreSQL Primary pod(s) + ## + extraPodSpec: {} + ## PostgreSQL Primary service configuration + ## + service: + ## @param primary.service.type Kubernetes Service type + ## + type: ClusterIP + ## @param primary.service.ports.postgresql PostgreSQL service port + ## + ports: + postgresql: 5432 + ## Node ports to expose + ## NOTE: choose port between <30000-32767> + ## @param primary.service.nodePorts.postgresql Node port for PostgreSQL + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + ## + nodePorts: + postgresql: "" + ## @param primary.service.clusterIP Static clusterIP or None for headless services + ## e.g: + ## clusterIP: None + ## + clusterIP: "" + ## @param primary.service.annotations Annotations for PostgreSQL primary service + ## + annotations: {} + ## @param primary.service.loadBalancerIP Load balancer IP if service type is `LoadBalancer` + ## Set the LoadBalancer service type to internal only + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer + ## + loadBalancerIP: "" + ## @param primary.service.externalTrafficPolicy Enable client source IP preservation + ## ref https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip + ## + externalTrafficPolicy: Cluster + ## @param primary.service.loadBalancerSourceRanges Addresses that are allowed when service is LoadBalancer + ## https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/#restrict-access-for-loadbalancer-service + ## + ## loadBalancerSourceRanges: + ## - 10.10.10.0/24 + ## + loadBalancerSourceRanges: [] + ## @param primary.service.extraPorts Extra ports to expose in the PostgreSQL primary service + ## + extraPorts: [] + ## @param primary.service.sessionAffinity Session Affinity for Kubernetes service, can be "None" or "ClientIP" + ## If "ClientIP", consecutive client requests will be directed to the same Pod + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies + ## + sessionAffinity: None + ## @param primary.service.sessionAffinityConfig Additional settings for the sessionAffinity + ## sessionAffinityConfig: + ## clientIP: + ## timeoutSeconds: 300 + ## + sessionAffinityConfig: {} + ## PostgreSQL Primary persistence configuration + ## + persistence: + ## @param primary.persistence.enabled Enable PostgreSQL Primary data persistence using PVC + ## + enabled: true + ## @param primary.persistence.existingClaim Name of an existing PVC to use + ## + existingClaim: "" + ## @param primary.persistence.mountPath The path the volume will be mounted at + ## Note: useful when using custom PostgreSQL images + ## + mountPath: /bitnami/postgresql + ## @param primary.persistence.subPath The subdirectory of the volume to mount to + ## Useful in dev environments and one PV for multiple services + ## + subPath: "" + ## @param primary.persistence.storageClass PVC Storage Class for PostgreSQL Primary data volume + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is + ## set, choosing the default provisioner. (gp2 on AWS, standard on + ## GKE, AWS & OpenStack) + ## + storageClass: "" + ## @param primary.persistence.accessModes PVC Access Mode for PostgreSQL volume + ## + accessModes: + - ReadWriteOnce + ## @param primary.persistence.size PVC Storage Request for PostgreSQL volume + ## + size: 8Gi + ## @param primary.persistence.annotations Annotations for the PVC + ## + annotations: {} + ## @param primary.persistence.selector Selector to match an existing Persistent Volume (this value is evaluated as a template) + ## selector: + ## matchLabels: + ## app: my-app + ## + selector: {} + ## @param primary.persistence.dataSource Custom PVC data source + ## + dataSource: {} + +## @section PostgreSQL read only replica parameters +## +readReplicas: + ## @param readReplicas.replicaCount Number of PostgreSQL read only replicas + ## + replicaCount: 1 + ## @param readReplicas.extraEnvVars Array with extra environment variables to add to PostgreSQL read only nodes + ## e.g: + ## extraEnvVars: + ## - name: FOO + ## value: "bar" + ## + extraEnvVars: [] + ## @param readReplicas.extraEnvVarsCM Name of existing ConfigMap containing extra env vars for PostgreSQL read only nodes + ## + extraEnvVarsCM: "" + ## @param readReplicas.extraEnvVarsSecret Name of existing Secret containing extra env vars for PostgreSQL read only nodes + ## + extraEnvVarsSecret: "" + ## @param readReplicas.command Override default container command (useful when using custom images) + ## + command: [] + ## @param readReplicas.args Override default container args (useful when using custom images) + ## + args: [] + ## Configure extra options for PostgreSQL read only containers' liveness, readiness and startup probes + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/#configure-probes + ## @param readReplicas.livenessProbe.enabled Enable livenessProbe on PostgreSQL read only containers + ## @param readReplicas.livenessProbe.initialDelaySeconds Initial delay seconds for livenessProbe + ## @param readReplicas.livenessProbe.periodSeconds Period seconds for livenessProbe + ## @param readReplicas.livenessProbe.timeoutSeconds Timeout seconds for livenessProbe + ## @param readReplicas.livenessProbe.failureThreshold Failure threshold for livenessProbe + ## @param readReplicas.livenessProbe.successThreshold Success threshold for livenessProbe + ## + livenessProbe: + enabled: true + initialDelaySeconds: 30 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 6 + successThreshold: 1 + ## @param readReplicas.readinessProbe.enabled Enable readinessProbe on PostgreSQL read only containers + ## @param readReplicas.readinessProbe.initialDelaySeconds Initial delay seconds for readinessProbe + ## @param readReplicas.readinessProbe.periodSeconds Period seconds for readinessProbe + ## @param readReplicas.readinessProbe.timeoutSeconds Timeout seconds for readinessProbe + ## @param readReplicas.readinessProbe.failureThreshold Failure threshold for readinessProbe + ## @param readReplicas.readinessProbe.successThreshold Success threshold for readinessProbe + ## + readinessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 6 + successThreshold: 1 + ## @param readReplicas.startupProbe.enabled Enable startupProbe on PostgreSQL read only containers + ## @param readReplicas.startupProbe.initialDelaySeconds Initial delay seconds for startupProbe + ## @param readReplicas.startupProbe.periodSeconds Period seconds for startupProbe + ## @param readReplicas.startupProbe.timeoutSeconds Timeout seconds for startupProbe + ## @param readReplicas.startupProbe.failureThreshold Failure threshold for startupProbe + ## @param readReplicas.startupProbe.successThreshold Success threshold for startupProbe + ## + startupProbe: + enabled: false + initialDelaySeconds: 30 + periodSeconds: 10 + timeoutSeconds: 1 + failureThreshold: 15 + successThreshold: 1 + ## @param readReplicas.customLivenessProbe Custom livenessProbe that overrides the default one + ## + customLivenessProbe: {} + ## @param readReplicas.customReadinessProbe Custom readinessProbe that overrides the default one + ## + customReadinessProbe: {} + ## @param readReplicas.customStartupProbe Custom startupProbe that overrides the default one + ## + customStartupProbe: {} + ## @param readReplicas.lifecycleHooks for the PostgreSQL read only container to automate configuration before or after startup + ## + lifecycleHooks: {} + ## PostgreSQL read only resource requests and limits + ## ref: https://kubernetes.io/docs/user-guide/compute-resources/ + ## @param readReplicas.resources.limits The resources limits for the PostgreSQL read only containers + ## @param readReplicas.resources.requests.memory The requested memory for the PostgreSQL read only containers + ## @param readReplicas.resources.requests.cpu The requested cpu for the PostgreSQL read only containers + ## + resources: + limits: {} + requests: + memory: 256Mi + cpu: 250m + ## Pod Security Context + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ + ## @param readReplicas.podSecurityContext.enabled Enable security context + ## @param readReplicas.podSecurityContext.fsGroup Group ID for the pod + ## + podSecurityContext: + enabled: true + fsGroup: 1001 + ## Container Security Context + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ + ## @param readReplicas.containerSecurityContext.enabled Enable container security context + ## @param readReplicas.containerSecurityContext.runAsUser User ID for the container + ## + containerSecurityContext: + enabled: true + runAsUser: 1001 + ## @param readReplicas.hostAliases PostgreSQL read only pods host aliases + ## https://kubernetes.io/docs/concepts/services-networking/add-entries-to-pod-etc-hosts-with-host-aliases/ + ## + hostAliases: [] + ## @param readReplicas.hostNetwork Specify if host network should be enabled for PostgreSQL pod (PostgreSQL read only) + ## + hostNetwork: false + ## @param readReplicas.hostIPC Specify if host IPC should be enabled for PostgreSQL pod (postgresql primary) + ## + hostIPC: false + ## @param readReplicas.labels Map of labels to add to the statefulset (PostgreSQL read only) + ## + labels: {} + ## @param readReplicas.annotations Annotations for PostgreSQL read only pods + ## + annotations: {} + ## @param readReplicas.podLabels Map of labels to add to the pods (PostgreSQL read only) + ## + podLabels: {} + ## @param readReplicas.podAnnotations Map of annotations to add to the pods (PostgreSQL read only) + ## + podAnnotations: {} + ## @param readReplicas.podAffinityPreset PostgreSQL read only pod affinity preset. Ignored if `primary.affinity` is set. Allowed values: `soft` or `hard` + ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity + ## + podAffinityPreset: "" + ## @param readReplicas.podAntiAffinityPreset PostgreSQL read only pod anti-affinity preset. Ignored if `primary.affinity` is set. Allowed values: `soft` or `hard` + ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity + ## + podAntiAffinityPreset: soft + ## PostgreSQL read only node affinity preset + ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#node-affinity + ## + nodeAffinityPreset: + ## @param readReplicas.nodeAffinityPreset.type PostgreSQL read only node affinity preset type. Ignored if `primary.affinity` is set. Allowed values: `soft` or `hard` + ## + type: "" + ## @param readReplicas.nodeAffinityPreset.key PostgreSQL read only node label key to match Ignored if `primary.affinity` is set. + ## E.g. + ## key: "kubernetes.io/e2e-az-name" + ## + key: "" + ## @param readReplicas.nodeAffinityPreset.values PostgreSQL read only node label values to match. Ignored if `primary.affinity` is set. + ## E.g. + ## values: + ## - e2e-az1 + ## - e2e-az2 + ## + values: [] + ## @param readReplicas.affinity Affinity for PostgreSQL read only pods assignment + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity + ## Note: primary.podAffinityPreset, primary.podAntiAffinityPreset, and primary.nodeAffinityPreset will be ignored when it's set + ## + affinity: {} + ## @param readReplicas.nodeSelector Node labels for PostgreSQL read only pods assignment + ## ref: https://kubernetes.io/docs/user-guide/node-selection/ + ## + nodeSelector: {} + ## @param readReplicas.tolerations Tolerations for PostgreSQL read only pods assignment + ## ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ + ## + tolerations: [] + ## @param readReplicas.topologySpreadConstraints Topology Spread Constraints for pod assignment spread across your cluster among failure-domains. Evaluated as a template + ## Ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/#spread-constraints-for-pods + ## + topologySpreadConstraints: [] + ## @param readReplicas.priorityClassName Priority Class to use for each pod (PostgreSQL read only) + ## + priorityClassName: "" + ## @param readReplicas.schedulerName Use an alternate scheduler, e.g. "stork". + ## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ + ## + schedulerName: "" + ## @param readReplicas.terminationGracePeriodSeconds Seconds PostgreSQL read only pod needs to terminate gracefully + ## ref: https://kubernetes.io/docs/concepts/workloads/pods/pod/#termination-of-pods + ## + terminationGracePeriodSeconds: "" + ## @param readReplicas.updateStrategy.type PostgreSQL read only statefulset strategy type + ## @param readReplicas.updateStrategy.rollingUpdate PostgreSQL read only statefulset rolling update configuration parameters + ## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#update-strategies + ## + updateStrategy: + type: RollingUpdate + rollingUpdate: {} + ## @param readReplicas.extraVolumeMounts Optionally specify extra list of additional volumeMounts for the PostgreSQL read only container(s) + ## + extraVolumeMounts: [] + ## @param readReplicas.extraVolumes Optionally specify extra list of additional volumes for the PostgreSQL read only pod(s) + ## + extraVolumes: [] + ## @param readReplicas.sidecars Add additional sidecar containers to the PostgreSQL read only pod(s) + ## For example: + ## sidecars: + ## - name: your-image-name + ## image: your-image + ## imagePullPolicy: Always + ## ports: + ## - name: portname + ## containerPort: 1234 + ## + sidecars: [] + ## @param readReplicas.initContainers Add additional init containers to the PostgreSQL read only pod(s) + ## Example + ## + ## initContainers: + ## - name: do-something + ## image: busybox + ## command: ['do', 'something'] + ## + initContainers: [] + ## @param readReplicas.extraPodSpec Optionally specify extra PodSpec for the PostgreSQL read only pod(s) + ## + extraPodSpec: {} + ## PostgreSQL read only service configuration + ## + service: + ## @param readReplicas.service.type Kubernetes Service type + ## + type: ClusterIP + ## @param readReplicas.service.ports.postgresql PostgreSQL service port + ## + ports: + postgresql: 5432 + ## Node ports to expose + ## NOTE: choose port between <30000-32767> + ## @param readReplicas.service.nodePorts.postgresql Node port for PostgreSQL + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + ## + nodePorts: + postgresql: "" + ## @param readReplicas.service.clusterIP Static clusterIP or None for headless services + ## e.g: + ## clusterIP: None + ## + clusterIP: "" + ## @param readReplicas.service.annotations Annotations for PostgreSQL read only service + ## + annotations: {} + ## @param readReplicas.service.loadBalancerIP Load balancer IP if service type is `LoadBalancer` + ## Set the LoadBalancer service type to internal only + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer + ## + loadBalancerIP: "" + ## @param readReplicas.service.externalTrafficPolicy Enable client source IP preservation + ## ref https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip + ## + externalTrafficPolicy: Cluster + ## @param readReplicas.service.loadBalancerSourceRanges Addresses that are allowed when service is LoadBalancer + ## https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/#restrict-access-for-loadbalancer-service + ## + ## loadBalancerSourceRanges: + ## - 10.10.10.0/24 + ## + loadBalancerSourceRanges: [] + ## @param readReplicas.service.extraPorts Extra ports to expose in the PostgreSQL read only service + ## + extraPorts: [] + ## @param readReplicas.service.sessionAffinity Session Affinity for Kubernetes service, can be "None" or "ClientIP" + ## If "ClientIP", consecutive client requests will be directed to the same Pod + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies + ## + sessionAffinity: None + ## @param readReplicas.service.sessionAffinityConfig Additional settings for the sessionAffinity + ## sessionAffinityConfig: + ## clientIP: + ## timeoutSeconds: 300 + ## + sessionAffinityConfig: {} + ## PostgreSQL read only persistence configuration + ## + persistence: + ## @param readReplicas.persistence.enabled Enable PostgreSQL read only data persistence using PVC + ## + enabled: true + ## @param readReplicas.persistence.mountPath The path the volume will be mounted at + ## Note: useful when using custom PostgreSQL images + ## + mountPath: /bitnami/postgresql + ## @param readReplicas.persistence.subPath The subdirectory of the volume to mount to + ## Useful in dev environments and one PV for multiple services + ## + subPath: "" + ## @param readReplicas.persistence.storageClass PVC Storage Class for PostgreSQL read only data volume + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is + ## set, choosing the default provisioner. (gp2 on AWS, standard on + ## GKE, AWS & OpenStack) + ## + storageClass: "" + ## @param readReplicas.persistence.accessModes PVC Access Mode for PostgreSQL volume + ## + accessModes: + - ReadWriteOnce + ## @param readReplicas.persistence.size PVC Storage Request for PostgreSQL volume + ## + size: 8Gi + ## @param readReplicas.persistence.annotations Annotations for the PVC + ## + annotations: {} + ## @param readReplicas.persistence.selector Selector to match an existing Persistent Volume (this value is evaluated as a template) + ## selector: + ## matchLabels: + ## app: my-app + ## + selector: {} + ## @param readReplicas.persistence.dataSource Custom PVC data source + ## + dataSource: {} + +## @section NetworkPolicy parameters + +## Add networkpolicies +## +networkPolicy: + ## @param networkPolicy.enabled Enable network policies + ## + enabled: false + ## @param networkPolicy.metrics.enabled Enable network policies for metrics (prometheus) + ## @param networkPolicy.metrics.namespaceSelector [object] Monitoring namespace selector labels. These labels will be used to identify the prometheus' namespace. + ## @param networkPolicy.metrics.podSelector [object] Monitoring pod selector labels. These labels will be used to identify the Prometheus pods. + ## + metrics: + enabled: false + ## e.g: + ## namespaceSelector: + ## label: monitoring + ## + namespaceSelector: {} + ## e.g: + ## podSelector: + ## label: monitoring + ## + podSelector: {} + ## Ingress Rules + ## + ingressRules: + ## @param networkPolicy.ingressRules.primaryAccessOnlyFrom.enabled Enable ingress rule that makes PostgreSQL primary node only accessible from a particular origin. + ## @param networkPolicy.ingressRules.primaryAccessOnlyFrom.namespaceSelector [object] Namespace selector label that is allowed to access the PostgreSQL primary node. This label will be used to identified the allowed namespace(s). + ## @param networkPolicy.ingressRules.primaryAccessOnlyFrom.podSelector [object] Pods selector label that is allowed to access the PostgreSQL primary node. This label will be used to identified the allowed pod(s). + ## @param networkPolicy.ingressRules.primaryAccessOnlyFrom.customRules [object] Custom network policy for the PostgreSQL primary node. + ## + primaryAccessOnlyFrom: + enabled: false + ## e.g: + ## namespaceSelector: + ## label: ingress + ## + namespaceSelector: {} + ## e.g: + ## podSelector: + ## label: access + ## + podSelector: {} + ## custom ingress rules + ## e.g: + ## customRules: + ## - from: + ## - namespaceSelector: + ## matchLabels: + ## label: example + customRules: {} + ## @param networkPolicy.ingressRules.readReplicasAccessOnlyFrom.enabled Enable ingress rule that makes PostgreSQL read-only nodes only accessible from a particular origin. + ## @param networkPolicy.ingressRules.readReplicasAccessOnlyFrom.namespaceSelector [object] Namespace selector label that is allowed to access the PostgreSQL read-only nodes. This label will be used to identified the allowed namespace(s). + ## @param networkPolicy.ingressRules.readReplicasAccessOnlyFrom.podSelector [object] Pods selector label that is allowed to access the PostgreSQL read-only nodes. This label will be used to identified the allowed pod(s). + ## @param networkPolicy.ingressRules.readReplicasAccessOnlyFrom.customRules [object] Custom network policy for the PostgreSQL read-only nodes. + ## + readReplicasAccessOnlyFrom: + enabled: false + ## e.g: + ## namespaceSelector: + ## label: ingress + ## + namespaceSelector: {} + ## e.g: + ## podSelector: + ## label: access + ## + podSelector: {} + ## custom ingress rules + ## e.g: + ## CustomRules: + ## - from: + ## - namespaceSelector: + ## matchLabels: + ## label: example + customRules: {} + ## @param networkPolicy.egressRules.denyConnectionsToExternal Enable egress rule that denies outgoing traffic outside the cluster, except for DNS (port 53). + ## @param networkPolicy.egressRules.customRules [object] Custom network policy rule + ## + egressRules: + # Deny connections to external. This is not compatible with an external database. + denyConnectionsToExternal: false + ## Additional custom egress rules + ## e.g: + ## customRules: + ## - to: + ## - namespaceSelector: + ## matchLabels: + ## label: example + customRules: {} + +## @section Volume Permissions parameters + +## Init containers parameters: +## volumePermissions: Change the owner and group of the persistent volume(s) mountpoint(s) to 'runAsUser:fsGroup' on each node +## +volumePermissions: + ## @param volumePermissions.enabled Enable init container that changes the owner and group of the persistent volume + ## + enabled: false + ## @param volumePermissions.image.registry Init container volume-permissions image registry + ## @param volumePermissions.image.repository Init container volume-permissions image repository + ## @param volumePermissions.image.tag Init container volume-permissions image tag (immutable tags are recommended) + ## @param volumePermissions.image.pullPolicy Init container volume-permissions image pull policy + ## @param volumePermissions.image.pullSecrets Init container volume-permissions image pull secrets + ## + image: + registry: docker.io + repository: bitnami/bitnami-shell + tag: 11-debian-11-r12 + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## Example: + ## pullSecrets: + ## - myRegistryKeySecretName + ## + pullSecrets: [] + ## Init container resource requests and limits + ## ref: https://kubernetes.io/docs/user-guide/compute-resources/ + ## @param volumePermissions.resources.limits Init container volume-permissions resource limits + ## @param volumePermissions.resources.requests Init container volume-permissions resource requests + ## + resources: + limits: {} + requests: {} + ## Init container' Security Context + ## Note: the chown of the data folder is done to containerSecurityContext.runAsUser + ## and not the below volumePermissions.containerSecurityContext.runAsUser + ## @param volumePermissions.containerSecurityContext.runAsUser User ID for the init container + ## + containerSecurityContext: + runAsUser: 0 + +## @section Other Parameters + +## Service account for PostgreSQL to use. +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/ +## +serviceAccount: + ## @param serviceAccount.create Enable creation of ServiceAccount for PostgreSQL pod + ## + create: false + ## @param serviceAccount.name The name of the ServiceAccount to use. + ## If not set and create is true, a name is generated using the common.names.fullname template + ## + name: "" + ## @param serviceAccount.automountServiceAccountToken Allows auto mount of ServiceAccountToken on the serviceAccount created + ## Can be set to false if pods using this serviceAccount do not need to use K8s API + ## + automountServiceAccountToken: true + ## @param serviceAccount.annotations Additional custom annotations for the ServiceAccount + ## + annotations: {} +## Creates role for ServiceAccount +## @param rbac.create Create Role and RoleBinding (required for PSP to work) +## +rbac: + create: false + ## @param rbac.rules Custom RBAC rules to set + ## e.g: + ## rules: + ## - apiGroups: + ## - "" + ## resources: + ## - pods + ## verbs: + ## - get + ## - list + ## + rules: [] +## Pod Security Policy +## ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/ +## @param psp.create Whether to create a PodSecurityPolicy. WARNING: PodSecurityPolicy is deprecated in Kubernetes v1.21 or later, unavailable in v1.25 or later +## +psp: + create: false + +## @section Metrics Parameters + +metrics: + ## @param metrics.enabled Start a prometheus exporter + ## + enabled: false + ## @param metrics.image.registry PostgreSQL Prometheus Exporter image registry + ## @param metrics.image.repository PostgreSQL Prometheus Exporter image repository + ## @param metrics.image.tag PostgreSQL Prometheus Exporter image tag (immutable tags are recommended) + ## @param metrics.image.pullPolicy PostgreSQL Prometheus Exporter image pull policy + ## @param metrics.image.pullSecrets Specify image pull secrets + ## + image: + registry: docker.io + repository: bitnami/postgres-exporter + tag: 0.10.1-debian-11-r12 + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## Example: + ## pullSecrets: + ## - myRegistryKeySecretName + ## + pullSecrets: [] + ## @param metrics.customMetrics Define additional custom metrics + ## ref: https://github.com/wrouesnel/postgres_exporter#adding-new-metrics-via-a-config-file + ## customMetrics: + ## pg_database: + ## query: "SELECT d.datname AS name, CASE WHEN pg_catalog.has_database_privilege(d.datname, 'CONNECT') THEN pg_catalog.pg_database_size(d.datname) ELSE 0 END AS size_bytes FROM pg_catalog.pg_database d where datname not in ('template0', 'template1', 'postgres')" + ## metrics: + ## - name: + ## usage: "LABEL" + ## description: "Name of the database" + ## - size_bytes: + ## usage: "GAUGE" + ## description: "Size of the database in bytes" + ## + customMetrics: {} + ## @param metrics.extraEnvVars Extra environment variables to add to PostgreSQL Prometheus exporter + ## see: https://github.com/wrouesnel/postgres_exporter#environment-variables + ## For example: + ## extraEnvVars: + ## - name: PG_EXPORTER_DISABLE_DEFAULT_METRICS + ## value: "true" + ## + extraEnvVars: [] + ## PostgreSQL Prometheus exporter containers' Security Context + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-container + ## @param metrics.containerSecurityContext.enabled Enable PostgreSQL Prometheus exporter containers' Security Context + ## @param metrics.containerSecurityContext.runAsUser Set PostgreSQL Prometheus exporter containers' Security Context runAsUser + ## @param metrics.containerSecurityContext.runAsNonRoot Set PostgreSQL Prometheus exporter containers' Security Context runAsNonRoot + ## + containerSecurityContext: + enabled: true + runAsUser: 1001 + runAsNonRoot: true + ## Configure extra options for PostgreSQL Prometheus exporter containers' liveness, readiness and startup probes + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/#configure-probes + ## @param metrics.livenessProbe.enabled Enable livenessProbe on PostgreSQL Prometheus exporter containers + ## @param metrics.livenessProbe.initialDelaySeconds Initial delay seconds for livenessProbe + ## @param metrics.livenessProbe.periodSeconds Period seconds for livenessProbe + ## @param metrics.livenessProbe.timeoutSeconds Timeout seconds for livenessProbe + ## @param metrics.livenessProbe.failureThreshold Failure threshold for livenessProbe + ## @param metrics.livenessProbe.successThreshold Success threshold for livenessProbe + ## + livenessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 6 + successThreshold: 1 + ## @param metrics.readinessProbe.enabled Enable readinessProbe on PostgreSQL Prometheus exporter containers + ## @param metrics.readinessProbe.initialDelaySeconds Initial delay seconds for readinessProbe + ## @param metrics.readinessProbe.periodSeconds Period seconds for readinessProbe + ## @param metrics.readinessProbe.timeoutSeconds Timeout seconds for readinessProbe + ## @param metrics.readinessProbe.failureThreshold Failure threshold for readinessProbe + ## @param metrics.readinessProbe.successThreshold Success threshold for readinessProbe + ## + readinessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 6 + successThreshold: 1 + ## @param metrics.startupProbe.enabled Enable startupProbe on PostgreSQL Prometheus exporter containers + ## @param metrics.startupProbe.initialDelaySeconds Initial delay seconds for startupProbe + ## @param metrics.startupProbe.periodSeconds Period seconds for startupProbe + ## @param metrics.startupProbe.timeoutSeconds Timeout seconds for startupProbe + ## @param metrics.startupProbe.failureThreshold Failure threshold for startupProbe + ## @param metrics.startupProbe.successThreshold Success threshold for startupProbe + ## + startupProbe: + enabled: false + initialDelaySeconds: 10 + periodSeconds: 10 + timeoutSeconds: 1 + failureThreshold: 15 + successThreshold: 1 + ## @param metrics.customLivenessProbe Custom livenessProbe that overrides the default one + ## + customLivenessProbe: {} + ## @param metrics.customReadinessProbe Custom readinessProbe that overrides the default one + ## + customReadinessProbe: {} + ## @param metrics.customStartupProbe Custom startupProbe that overrides the default one + ## + customStartupProbe: {} + ## @param metrics.containerPorts.metrics PostgreSQL Prometheus exporter metrics container port + ## + containerPorts: + metrics: 9187 + ## PostgreSQL Prometheus exporter resource requests and limits + ## ref: https://kubernetes.io/docs/user-guide/compute-resources/ + ## @param metrics.resources.limits The resources limits for the PostgreSQL Prometheus exporter container + ## @param metrics.resources.requests The requested resources for the PostgreSQL Prometheus exporter container + ## + resources: + limits: {} + requests: {} + ## Service configuration + ## + service: + ## @param metrics.service.ports.metrics PostgreSQL Prometheus Exporter service port + ## + ports: + metrics: 9187 + ## @param metrics.service.clusterIP Static clusterIP or None for headless services + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#choosing-your-own-ip-address + ## + clusterIP: "" + ## @param metrics.service.sessionAffinity Control where client requests go, to the same pod or round-robin + ## Values: ClientIP or None + ## ref: https://kubernetes.io/docs/user-guide/services/ + ## + sessionAffinity: None + ## @param metrics.service.annotations [object] Annotations for Prometheus to auto-discover the metrics endpoint + ## + annotations: + prometheus.io/scrape: "true" + prometheus.io/port: "{{ .Values.metrics.service.ports.metrics }}" + ## Prometheus Operator ServiceMonitor configuration + ## + serviceMonitor: + ## @param metrics.serviceMonitor.enabled Create ServiceMonitor Resource for scraping metrics using Prometheus Operator + ## + enabled: false + ## @param metrics.serviceMonitor.namespace Namespace for the ServiceMonitor Resource (defaults to the Release Namespace) + ## + namespace: "" + ## @param metrics.serviceMonitor.interval Interval at which metrics should be scraped. + ## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#endpoint + ## + interval: "" + ## @param metrics.serviceMonitor.scrapeTimeout Timeout after which the scrape is ended + ## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#endpoint + ## + scrapeTimeout: "" + ## @param metrics.serviceMonitor.labels Additional labels that can be used so ServiceMonitor will be discovered by Prometheus + ## + labels: {} + ## @param metrics.serviceMonitor.selector Prometheus instance selector labels + ## ref: https://github.com/bitnami/charts/tree/master/bitnami/prometheus-operator#prometheus-configuration + ## + selector: {} + ## @param metrics.serviceMonitor.relabelings RelabelConfigs to apply to samples before scraping + ## + relabelings: [] + ## @param metrics.serviceMonitor.metricRelabelings MetricRelabelConfigs to apply to samples before ingestion + ## + metricRelabelings: [] + ## @param metrics.serviceMonitor.honorLabels Specify honorLabels parameter to add the scrape endpoint + ## + honorLabels: false + ## @param metrics.serviceMonitor.jobLabel The name of the label on the target service to use as the job name in prometheus. + ## + jobLabel: "" + ## Custom PrometheusRule to be defined + ## The value is evaluated as a template, so, for example, the value can depend on .Release or .Chart + ## ref: https://github.com/coreos/prometheus-operator#customresourcedefinitions + ## + prometheusRule: + ## @param metrics.prometheusRule.enabled Create a PrometheusRule for Prometheus Operator + ## + enabled: false + ## @param metrics.prometheusRule.namespace Namespace for the PrometheusRule Resource (defaults to the Release Namespace) + ## + namespace: "" + ## @param metrics.prometheusRule.labels Additional labels that can be used so PrometheusRule will be discovered by Prometheus + ## + labels: {} + ## @param metrics.prometheusRule.rules PrometheusRule definitions + ## Make sure to constraint the rules to the current postgresql service. + ## rules: + ## - alert: HugeReplicationLag + ## expr: pg_replication_lag{service="{{ printf "%s-metrics" (include "common.names.fullname" .) }}"} / 3600 > 1 + ## for: 1m + ## labels: + ## severity: critical + ## annotations: + ## description: replication for {{ include "common.names.fullname" . }} PostgreSQL is lagging by {{ "{{ $value }}" }} hour(s). + ## summary: PostgreSQL replication is lagging by {{ "{{ $value }}" }} hour(s). + ## + rules: [] diff --git a/infrastructure/kubernetes/modules/database/variable.tf b/infrastructure/kubernetes/modules/gcp/database/variable.tf similarity index 78% rename from infrastructure/kubernetes/modules/database/variable.tf rename to infrastructure/kubernetes/modules/gcp/database/variable.tf index de7251d7a2..4b6d037048 100644 --- a/infrastructure/kubernetes/modules/database/variable.tf +++ b/infrastructure/kubernetes/modules/gcp/database/variable.tf @@ -1,6 +1,6 @@ -variable "cluster_name" { +variable "region" { type = string - description = "The k8s cluster name" + description = "GCP region" } variable "namespace" { diff --git a/infrastructure/kubernetes/modules/gcp/database/versions.tf b/infrastructure/kubernetes/modules/gcp/database/versions.tf new file mode 100644 index 0000000000..9bd04dabf0 --- /dev/null +++ b/infrastructure/kubernetes/modules/gcp/database/versions.tf @@ -0,0 +1,14 @@ +terraform { + required_providers { + kubernetes = { + source = "hashicorp/kubernetes" + version = "~> 2.14.0" + } + + helm = { + source = "hashicorp/helm" + version = "~> 2.7.0" + } + } + required_version = "~> 1.3.2" +} diff --git a/infrastructure/kubernetes/modules/gcp/env/main.tf b/infrastructure/kubernetes/modules/gcp/env/main.tf new file mode 100644 index 0000000000..0153a2ff06 --- /dev/null +++ b/infrastructure/kubernetes/modules/gcp/env/main.tf @@ -0,0 +1,271 @@ +module "k8s_namespace" { + source = "../../k8s_namespace" + namespace = var.environment +} + +module "k8s_database" { + source = "../database" + region = var.gcp_region + namespace = var.environment + username = module.k8s_secrets.postgres_username + password = module.k8s_secrets.postgres_password + database = module.k8s_secrets.postgres_database + + depends_on = [ + module.k8s_namespace + ] +} + +module "k8s_redis" { + source = "../redis" + region = var.gcp_region + namespace = var.environment + + depends_on = [ + module.k8s_namespace + ] +} + +module "k8s_api" { + source = "../../api" + deployment_name = "api" + image = "${var.api_container_registry_url}:${var.image_tag}" + + namespace = var.environment + + secrets = concat(var.api_secrets, [ + { + name = "DB_HOST" + secret_name = "db" + secret_key = "DB_HOST" + }, { + name = "DB_USERNAME" + secret_name = "db" + secret_key = "DB_USERNAME" + }, { + name = "DB_PASSWORD" + secret_name = "db" + secret_key = "DB_PASSWORD" + }, { + name = "DB_DATABASE" + secret_name = "db" + secret_key = "DB_DATABASE" + }, { + name = "QUEUE_HOST" + secret_name = "db" + secret_key = "REDIS_HOST" + }, { + name = "GEOCODING_CACHE_HOST" + secret_name = "db" + secret_key = "REDIS_HOST" + }, { + name = "DB_CACHE_HOST" + secret_name = "db" + secret_key = "REDIS_HOST" + }, { + name = "JWT_SECRET" + secret_name = "api" + secret_key = "JWT_SECRET" + }, { + name = "GMAPS_API_KEY" + secret_name = "api" + secret_key = "GMAPS_API_KEY" + } + ]) + + env_vars = concat(var.api_env_vars, [ + { + name = "PORT" + value = 3000 + }, + { + name = "JWT_EXPIRES_IN" + value = "2h" + }, + { + name = "DISTRIBUTED_MAP" + value = "true" + }, + { + name = "REQUIRE_USER_AUTH" + value = "true" + }, + { + name = "REQUIRE_USER_ACCOUNT_ACTIVATION" + value = "true" + }, + { + name = "USE_NEW_METHODOLOGY" + value = "true" + } + ]) + + depends_on = [ + module.k8s_namespace, + module.k8s_database + ] +} + +module "k8s_tiler" { + source = "../../tiler" + deployment_name = "tiler" + image = "${var.tiler_container_registry_url}:${var.image_tag}" + namespace = var.environment + + + env_vars = concat(var.tiler_env_vars, [ + { + name = "API_URL" + value = "${module.k8s_api.api_service_name}.${var.environment}.svc.cluster.local" + }, + { + name = "API_PORT" + // TODO: get port from api k8s service + value = 3000 + }, + { + name = "S3_BUCKET_NAME" + value = var.science_bucket_name + }, + { + name = "ROOT_PATH" + value = "" + }, + { + name = "TITILER_PREFIX" + value = "/tiler/cog" + }, + { + name = "TITILER_ROUTER_PREFIX" + value = "/tiler/cog" + }, + + { + name = "DEFAULT_COG" + value = "biomass.tif" + }, + { + name = "REQUIRE_AUTH" + value = "false" + } + + ]) + +} + +module "k8s_client" { + source = "../../client" + deployment_name = "client" + image = "${var.client_container_registry_url}:${var.image_tag}" + namespace = var.environment + site_url = module.k8s_ingress.client_url + api_url = module.k8s_ingress.api_url + + depends_on = [ + module.k8s_namespace + ] +} + +module "k8s_data_import" { + source = "../../data_import" + job_name = "data-import" + image = "${var.data_import_container_registry_url}:${var.image_tag}" + namespace = var.environment + load_data = var.load_fresh_data + arguments = var.data_import_arguments + + env_vars = [ + { + name = "API_POSTGRES_PORT" + value = "5432" + } + ] + + secrets = [ + { + name = "API_POSTGRES_HOST" + secret_name = "db" + secret_key = "DB_HOST" + }, { + name = "API_POSTGRES_USERNAME" + secret_name = "db" + secret_key = "DB_USERNAME" + }, { + name = "API_POSTGRES_PASSWORD" + secret_name = "db" + secret_key = "DB_PASSWORD" + }, { + name = "API_POSTGRES_DATABASE" + secret_name = "db" + secret_key = "DB_DATABASE" + }, { + name = "DATA_S3_ACCESS_KEY" + secret_name = "data" + secret_key = "AWS_ACCESS_KEY_ID" + }, { + name = "DATA_S3_SECRET_KEY" + secret_name = "data" + secret_key = "AWS_SECRET_ACCESS_KEY" + } + ] + + depends_on = [ + module.k8s_namespace, + module.k8s_database, + module.data-import-group + ] +} + +module "k8s_secrets" { + source = "../secrets" + region = var.gcp_region + tf_state_bucket = var.tf_state_bucket + namespace = var.environment + gmaps_api_key = var.gmaps_api_key + aws_access_key_id = var.aws_access_key_id + aws_secret_access_key = var.aws_secret_access_key + + depends_on = [ + module.k8s_namespace + ] +} + +module "k8s_ingress" { + source = "../ingress" + region = var.gcp_region + project = var.gcp_project + namespace = var.environment + domain = var.domain + depends_on = [module.k8s_namespace] +} + +module "data-import-group" { + count = var.load_fresh_data == true ? 1 : 0 + source = "../node_group" + + region = var.gcp_region + zone = var.gcp_zone + cluster_name = var.cluster_name + node_group_name = "data-import-node-group" + instance_type = "e2-standard-32" + min_size = 1 + max_size = 2 + namespace = var.environment + + labels = { + type : "data-import-${var.environment}" + } +} + +module "github_actions_frontend_secrets" { + source = "../../github_secrets" + repo_name = var.repo_name + branch = var.repo_branch + domain = var.domain +} + +#module "data_import" { +# source = "../../modules/fargate" +# namespace = var.environment +# postgresql_port = module.k8s_database.postgresql_service_port +#} diff --git a/infrastructure/kubernetes/modules/gcp/env/variables.tf b/infrastructure/kubernetes/modules/gcp/env/variables.tf new file mode 100644 index 0000000000..23c41898d4 --- /dev/null +++ b/infrastructure/kubernetes/modules/gcp/env/variables.tf @@ -0,0 +1,146 @@ +variable "project_name" { + default = "landgriffon" + type = string + description = "A project namespace for the infrastructure." +} + +variable "environment" { + type = string + description = "An environment namespace for the infrastructure." +} + +variable "gcp_region" { + type = string + description = "A valid GCP region to configure the underlying GCP SDK." +} + +variable "gcp_project" { + type = string + description = "A valid GCP project id to configure the underlying GCP SDK." +} + +variable "gcp_zone" { + description = "A valid GCP zone to configure the underlying GCP SDK." + type = string +} + +variable "repo_name" { + type = string + description = "Name of the github repo where the project is hosted" +} + +variable "repo_branch" { + type = string + description = "The github branch to use" +} + +variable "cluster_name" { + type = string + description = "The k8s cluster name" +} + +variable "domain" { + type = string + description = "Domain where the app is publicly available" +} + +variable "private_subnet_ids" { + type = list(string) + description = "IDs of the subnets used in the EKS cluster" +} + +variable "image_tag" { + type = string + description = "The tag to use when pulling docker images" +} + +variable "tf_state_bucket" { + type = string + description = "The name of the S3 bucket where the state is stored" +} + +variable "allowed_account_id" { + type = string + description = "Allowed AWS Account ID" +} + +variable "gmaps_api_key" { + type = string + sensitive = true + description = "The Google Maps API key used for access to the geocoding API" +} + +variable "load_fresh_data" { + type = bool + default = false + description = "If a new data import should be triggered. Clears the current database." +} + +variable "data_import_arguments" { + type = list(string) + default = ["seed-data"] + description = "Arguments to pass to the initial data import process" +} + +variable "api_container_registry_url" { + type = string + description = "URL for the API container registry" +} + +variable "tiler_container_registry_url" { + type = string + description = "URL for the Tiler container registry" +} + +variable "client_container_registry_url" { + type = string + description = "URL for the client container registry" +} + +variable "aws_access_key_id" { + type = string + description = "AWS Access Key ID to read S3 data for data import" +} + +variable "aws_secret_access_key" { + type = string + description = "AWS Secret Access Key to read S3 data for data import" +} + +variable "science_bucket_name" { + type = string + description = "Name of the LG Science S3 Bucket" +} + +variable "data_import_container_registry_url" { + type = string + description = "URL for the data import container registry" +} + +variable "api_env_vars" { + type = list(object({ + name = string + value = string + })) + description = "Key-value pairs of env vars to make available to the api container" + default = [] +} + +variable "api_secrets" { + type = list(object({ + name = string + secret_name = string + secret_key = string + })) + description = "List of secrets to make available to the api container" + default = [] +} + +variable "tiler_env_vars" { + type = list(object({ + name = string + value = string + })) + description = "Key-value pairs of env vars to make available to the tiler container" + default = [] +} diff --git a/infrastructure/kubernetes/modules/gcp/env/versions.tf b/infrastructure/kubernetes/modules/gcp/env/versions.tf new file mode 100644 index 0000000000..9bd04dabf0 --- /dev/null +++ b/infrastructure/kubernetes/modules/gcp/env/versions.tf @@ -0,0 +1,14 @@ +terraform { + required_providers { + kubernetes = { + source = "hashicorp/kubernetes" + version = "~> 2.14.0" + } + + helm = { + source = "hashicorp/helm" + version = "~> 2.7.0" + } + } + required_version = "~> 1.3.2" +} diff --git a/infrastructure/kubernetes/modules/gcp/ingress/main.tf b/infrastructure/kubernetes/modules/gcp/ingress/main.tf new file mode 100644 index 0000000000..51343c38aa --- /dev/null +++ b/infrastructure/kubernetes/modules/gcp/ingress/main.tf @@ -0,0 +1,128 @@ +resource "google_compute_global_address" "gateway_static_ip" { + name = "cluster-ip" + address_type = "EXTERNAL" + project = var.project +} + +locals { + api_domain = "api.${var.namespace != "production" ? ("${var.namespace}.") : ""}${var.domain}" + client_domain = "client.${var.namespace != "production" ? ("${var.namespace}.") : ""}${var.domain}" +} + +resource "kubernetes_manifest" "http_to_https" { + manifest = { + apiVersion = "networking.gke.io/v1beta1" + kind = "FrontendConfig" + + metadata = { + name = "http-to-https" + namespace = var.namespace + } + + spec = { + redirectToHttps = { + enabled = true + responseCodeName = "MOVED_PERMANENTLY_DEFAULT" + } + } + } +} + +resource "kubernetes_manifest" "langriffon_managed_cert" { + manifest = { + apiVersion = "networking.gke.io/v1" + kind = "ManagedCertificate" + metadata = { + name = "langriffon-managed-cert" + namespace = var.namespace + } + spec = { + domains : [local.api_domain, local.client_domain] + } + } +} + +resource "kubernetes_ingress_v1" "gateway_ingress" { + metadata { + name = "gateway-ingress" + namespace = var.namespace + annotations = { + "kubernetes.io/ingress.global-static-ip-name" = google_compute_global_address.gateway_static_ip.name + "networking.gke.io/managed-certificates" = "langriffon-managed-cert" + "kubernetes.io/ingress.class" = "gce" + "networking.gke.io/v1beta1.FrontendConfig" = kubernetes_manifest.http_to_https.object.metadata.name + } + } + + spec { + rule { + host = local.api_domain + http { + path { + path_type = "Prefix" + path = "/tiler" + backend { + service { + name = "tiler" + port { + number = 4000 + } + } + } + } + } + } + + rule { + host = local.api_domain + http { + path { + backend { + service { + name = "api" + port { + number = 3000 + } + } + } + } + } + } + + rule { + host = local.client_domain + http { + path { + backend { + service { + name = "client" + port { + number = 3000 + } + } + } + } + } + } + } +} + +data "aws_route53_zone" "landgriffon-com" { + name = "landgriffon.com." +} + +resource "aws_route53_record" "api-landgriffon-com" { + zone_id = data.aws_route53_zone.landgriffon-com.zone_id + name = local.api_domain + type = "A" + ttl = "300" + records = [google_compute_global_address.gateway_static_ip.address] +} + +resource "aws_route53_record" "client-landgriffon-com" { + zone_id = data.aws_route53_zone.landgriffon-com.zone_id + name = local.client_domain + type = "A" + ttl = "300" + records = [google_compute_global_address.gateway_static_ip.address] +} diff --git a/infrastructure/kubernetes/modules/gcp/ingress/output.tf b/infrastructure/kubernetes/modules/gcp/ingress/output.tf new file mode 100644 index 0000000000..a9f62d5bd4 --- /dev/null +++ b/infrastructure/kubernetes/modules/gcp/ingress/output.tf @@ -0,0 +1,7 @@ +output "api_url" { + value = local.api_domain +} + +output "client_url" { + value = local.client_domain +} diff --git a/infrastructure/kubernetes/modules/gcp/ingress/variables.tf b/infrastructure/kubernetes/modules/gcp/ingress/variables.tf new file mode 100644 index 0000000000..25dcdfeb8b --- /dev/null +++ b/infrastructure/kubernetes/modules/gcp/ingress/variables.tf @@ -0,0 +1,19 @@ +variable "project" { + description = "Name of the GCP project" + type = string +} + +variable "region" { + description = "A valid GCP region to configure the underlying GCP SDK." + type = string +} + +variable "domain" { + type = string + description = "Domain where the app is publicly available" +} + +variable "namespace" { + type = string + description = "The k8s namespace to use" +} diff --git a/infrastructure/kubernetes/modules/gcp/node_group/main.tf b/infrastructure/kubernetes/modules/gcp/node_group/main.tf new file mode 100644 index 0000000000..c693535908 --- /dev/null +++ b/infrastructure/kubernetes/modules/gcp/node_group/main.tf @@ -0,0 +1,46 @@ +data "google_service_account" "eks-node-service-account" { + account_id = "eks-node-service-account" +} + +resource "random_id" "eks-node-group" { + keepers = { + instance_types = var.instance_type + namespace = var.namespace + } + byte_length = 8 +} + +resource "google_container_node_pool" "node_pool" { + name = "${var.node_group_name}-${random_id.eks-node-group.hex}" + location = var.zone + cluster = var.cluster_name + + management { + auto_repair = true + auto_upgrade = true + } + + autoscaling { + max_node_count = var.max_size + min_node_count = var.min_size + } + + node_config { + preemptible = false + machine_type = var.instance_type + + service_account = data.google_service_account.eks-node-service-account.email + + oauth_scopes = [ + "https://www.googleapis.com/auth/devstorage.read_only", + "https://www.googleapis.com/auth/logging.write", + "https://www.googleapis.com/auth/monitoring", + "https://www.googleapis.com/auth/service.management.readonly", + "https://www.googleapis.com/auth/servicecontrol", + "https://www.googleapis.com/auth/trace.append", + "https://www.googleapis.com/auth/sqlservice.admin", + ] + + labels = var.labels + } +} diff --git a/infrastructure/kubernetes/modules/gcp/node_group/outputs.tf b/infrastructure/kubernetes/modules/gcp/node_group/outputs.tf new file mode 100644 index 0000000000..d9228fb243 --- /dev/null +++ b/infrastructure/kubernetes/modules/gcp/node_group/outputs.tf @@ -0,0 +1,4 @@ +output "node_group_name" { + value = google_container_node_pool.node_pool.name + description = "Node group name" +} diff --git a/infrastructure/kubernetes/modules/gcp/node_group/variable.tf b/infrastructure/kubernetes/modules/gcp/node_group/variable.tf new file mode 100644 index 0000000000..88db6b39b3 --- /dev/null +++ b/infrastructure/kubernetes/modules/gcp/node_group/variable.tf @@ -0,0 +1,48 @@ +variable "node_group_name" { + type = string + description = "Name of the node group" +} + +variable "cluster_name" { + type = string + description = "Name of the EKS cluster to which this node group will be attached" +} + +variable "region" { + type = string + description = "GCP region" +} + +variable "zone" { + description = "A valid GCP zone to configure the underlying GCP SDK." + type = string +} + +variable "min_size" { + type = number + default = 1 + description = "Minimum number of nodes in the group" +} + +variable "max_size" { + type = number + default = 1 + description = "Maximum number of nodes in the group" +} + +variable "instance_type" { + type = string + description = "Name of the Compute Instance type to use" + default = "e2-standard-32" +} + +variable "namespace" { + type = string + description = "The k8s namespace to use" +} + +variable "labels" { + type = map(string) + default = {} + description = "Labels to apply to nodes" +} diff --git a/infrastructure/kubernetes/modules/gcp/redis/main.tf b/infrastructure/kubernetes/modules/gcp/redis/main.tf new file mode 100644 index 0000000000..2f7a75a3bb --- /dev/null +++ b/infrastructure/kubernetes/modules/gcp/redis/main.tf @@ -0,0 +1,70 @@ +resource "google_project_service" "secret_manager_api" { + service = "secretmanager.googleapis.com" + disable_on_destroy = false +} + +locals { + redis_secret_json = { + username = "redis" + password = random_password.redis_admin_generator.result + } +} + +resource "random_password" "redis_admin_generator" { + length = 24 + special = true +} + +resource "google_secret_manager_secret" "redis_admin_secret" { + secret_id = "redis-admin-credentials-${var.namespace}" + + replication { + user_managed { + replicas { + location = var.region + } + } + } + + depends_on = [google_project_service.secret_manager_api] +} + +resource "google_secret_manager_secret_version" "redis_admin_secret_version" { + secret = google_secret_manager_secret.redis_admin_secret.id + + secret_data = jsonencode(local.redis_secret_json) +} + + +resource "helm_release" "redis" { + name = "redis" + repository = "https://charts.bitnami.com/bitnami" + chart = "redis" + version = "16.13.2" + namespace = var.namespace + + values = [ + file("${path.module}/values.yaml") + ] + + set { + name = "auth.existingSecretPasswordKey" + value = "redis-password" + } + + set { + name = "auth.existingSecret" + value = "redis-secret" + } +} + +resource "kubernetes_secret" "redis-secret" { + metadata { + name = "redis-secret" + namespace = var.namespace + } + + data = { + redis-password = sensitive(local.redis_secret_json.password) + } +} diff --git a/infrastructure/kubernetes/modules/gcp/redis/outputs.tf b/infrastructure/kubernetes/modules/gcp/redis/outputs.tf new file mode 100644 index 0000000000..e69de29bb2 diff --git a/infrastructure/kubernetes/modules/gcp/redis/values.yaml b/infrastructure/kubernetes/modules/gcp/redis/values.yaml new file mode 100644 index 0000000000..93fe9c151f --- /dev/null +++ b/infrastructure/kubernetes/modules/gcp/redis/values.yaml @@ -0,0 +1,1387 @@ +## @section Global parameters +## Global Docker image parameters +## Please, note that this will override the image parameters, including dependencies, configured to use the global value +## Current available global Docker image parameters: imageRegistry, imagePullSecrets and storageClass +## + +## @param global.imageRegistry Global Docker image registry +## @param global.imagePullSecrets Global Docker registry secret names as an array +## @param global.storageClass Global StorageClass for Persistent Volume(s) +## @param global.redis.password Global Redis™ password (overrides `auth.password`) +## +global: + imageRegistry: "" + ## E.g. + ## imagePullSecrets: + ## - myRegistryKeySecretName + ## + imagePullSecrets: [] + storageClass: "" + redis: + password: "" + +## @section Common parameters +## + +## @param kubeVersion Override Kubernetes version +## +kubeVersion: "" +## @param nameOverride String to partially override common.names.fullname +## +nameOverride: "" +## @param fullnameOverride String to fully override common.names.fullname +## +fullnameOverride: "" +## @param commonLabels Labels to add to all deployed objects +## +commonLabels: {} +## @param commonAnnotations Annotations to add to all deployed objects +## +commonAnnotations: {} +## @param clusterDomain Kubernetes cluster domain name +## +clusterDomain: cluster.local +## @param extraDeploy Array of extra objects to deploy with the release +## +extraDeploy: [] + +## Enable diagnostic mode in the deployment +## +diagnosticMode: + ## @param diagnosticMode.enabled Enable diagnostic mode (all probes will be disabled and the command will be overridden) + ## + enabled: false + ## @param diagnosticMode.command Command to override all containers in the deployment + ## + command: + - sleep + ## @param diagnosticMode.args Args to override all containers in the deployment + ## + args: + - infinity + +## @section Redis™ Image parameters +## + +## Bitnami Redis™ image +## ref: https://hub.docker.com/r/bitnami/redis/tags/ +## @param image.registry Redis™ image registry +## @param image.repository Redis™ image repository +## @param image.tag Redis™ image tag (immutable tags are recommended) +## @param image.pullPolicy Redis™ image pull policy +## @param image.pullSecrets Redis™ image pull secrets +## @param image.debug Enable image debug mode +## +image: + registry: docker.io + repository: bitnami/redis + tag: 6.2.6-debian-10-r21 + ## Specify a imagePullPolicy + ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## e.g: + ## pullSecrets: + ## - myRegistryKeySecretName + ## + pullSecrets: [] + ## Enable debug mode + ## + debug: false + +## @section Redis™ common configuration parameters +## https://github.com/bitnami/bitnami-docker-redis#configuration +## + +## @param architecture Redis™ architecture. Allowed values: `standalone` or `replication` +## +architecture: standalone +## Redis™ Authentication parameters +## ref: https://github.com/bitnami/bitnami-docker-redis#setting-the-server-password-on-first-run +## +auth: + ## @param auth.enabled Enable password authentication + ## + enabled: false + ## @param auth.sentinel Enable password authentication on sentinels too + ## + sentinel: true + ## @param auth.password Redis™ password + ## Defaults to a random 10-character alphanumeric string if not set + ## + password: "" + ## @param auth.existingSecret The name of an existing secret with Redis™ credentials + ## NOTE: When it's set, the previous `auth.password` parameter is ignored + ## + existingSecret: "" + ## @param auth.existingSecretPasswordKey Password key to be retrieved from existing secret + ## NOTE: ignored unless `auth.existingSecret` parameter is set + ## + existingSecretPasswordKey: "" + ## @param auth.usePasswordFiles Mount credentials as files instead of using an environment variable + ## + usePasswordFiles: false +## @param commonConfiguration [string] Common configuration to be added into the ConfigMap +## ref: https://redis.io/topics/config +## +commonConfiguration: |- + # Enable AOF https://redis.io/topics/persistence#append-only-file + appendonly yes + # Disable RDB persistence, AOF persistence already enabled. + save "" +## @param existingConfigmap The name of an existing ConfigMap with your custom configuration for Redis™ nodes +## +existingConfigmap: "" + +## @section Redis™ master configuration parameters +## + +master: + ## @param master.configuration Configuration for Redis™ master nodes + ## ref: https://redis.io/topics/config + ## + configuration: "" + ## @param master.disableCommands [array] Array with Redis™ commands to disable on master nodes + ## Commands will be completely disabled by renaming each to an empty string. + ## ref: https://redis.io/topics/security#disabling-of-specific-commands + ## + ## disableCommands: + ## - FLUSHDB + ## - FLUSHALL + ## @param master.command Override default container command (useful when using custom images) + ## + command: [] + ## @param master.args Override default container args (useful when using custom images) + ## + args: [] + ## @param master.preExecCmds Additional commands to run prior to starting Redis™ master + ## + preExecCmds: [] + ## @param master.extraFlags Array with additional command line flags for Redis™ master + ## e.g: + ## extraFlags: + ## - "--maxmemory-policy volatile-ttl" + ## - "--repl-backlog-size 1024mb" + ## + extraFlags: [] + ## @param master.extraEnvVars Array with extra environment variables to add to Redis™ master nodes + ## e.g: + ## extraEnvVars: + ## - name: FOO + ## value: "bar" + ## + extraEnvVars: [] + ## @param master.extraEnvVarsCM Name of existing ConfigMap containing extra env vars for Redis™ master nodes + ## + extraEnvVarsCM: "" + ## @param master.extraEnvVarsSecret Name of existing Secret containing extra env vars for Redis™ master nodes + ## + extraEnvVarsSecret: "" + ## @param master.containerPort Container port to open on Redis™ master nodes + ## + containerPort: 6379 + ## Configure extra options for Redis™ containers' liveness and readiness probes + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes + ## @param master.livenessProbe.enabled Enable livenessProbe on Redis™ master nodes + ## @param master.livenessProbe.initialDelaySeconds Initial delay seconds for livenessProbe + ## @param master.livenessProbe.periodSeconds Period seconds for livenessProbe + ## @param master.livenessProbe.timeoutSeconds Timeout seconds for livenessProbe + ## @param master.livenessProbe.failureThreshold Failure threshold for livenessProbe + ## @param master.livenessProbe.successThreshold Success threshold for livenessProbe + ## + livenessProbe: + enabled: true + initialDelaySeconds: 20 + periodSeconds: 5 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 5 + ## @param master.readinessProbe.enabled Enable readinessProbe on Redis™ master nodes + ## @param master.readinessProbe.initialDelaySeconds Initial delay seconds for readinessProbe + ## @param master.readinessProbe.periodSeconds Period seconds for readinessProbe + ## @param master.readinessProbe.timeoutSeconds Timeout seconds for readinessProbe + ## @param master.readinessProbe.failureThreshold Failure threshold for readinessProbe + ## @param master.readinessProbe.successThreshold Success threshold for readinessProbe + ## + readinessProbe: + enabled: true + initialDelaySeconds: 20 + periodSeconds: 5 + timeoutSeconds: 1 + successThreshold: 1 + failureThreshold: 5 + ## @param master.customLivenessProbe Custom livenessProbe that overrides the default one + ## + customLivenessProbe: {} + ## @param master.customReadinessProbe Custom readinessProbe that overrides the default one + ## + customReadinessProbe: {} + ## Redis™ master resource requests and limits + ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ + ## @param master.resources.limits The resources limits for the Redis™ master containers + ## @param master.resources.requests The requested resources for the Redis™ master containers + ## + resources: + limits: {} + requests: + memory: 256Mi + cpu: 250m + ## Configure Pods Security Context + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod + ## @param master.podSecurityContext.enabled Enabled Redis™ master pods' Security Context + ## @param master.podSecurityContext.fsGroup Set Redis™ master pod's Security Context fsGroup + ## + podSecurityContext: + enabled: true + fsGroup: 1001 + ## Configure Container Security Context + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod + ## @param master.containerSecurityContext.enabled Enabled Redis™ master containers' Security Context + ## @param master.containerSecurityContext.runAsUser Set Redis™ master containers' Security Context runAsUser + ## + containerSecurityContext: + enabled: true + runAsUser: 1001 + ## @param master.schedulerName Alternate scheduler for Redis™ master pods + ## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ + ## + schedulerName: "" + ## @param master.updateStrategy.type Redis™ master statefulset strategy type + ## @skip master.updateStrategy.rollingUpdate + ## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#update-strategies + ## + updateStrategy: + ## StrategyType + ## Can be set to RollingUpdate or OnDelete + ## + type: RollingUpdate + rollingUpdate: {} + ## @param master.priorityClassName Redis™ master pods' priorityClassName + ## + priorityClassName: "" + ## @param master.hostAliases Redis™ master pods host aliases + ## https://kubernetes.io/docs/concepts/services-networking/add-entries-to-pod-etc-hosts-with-host-aliases/ + ## + hostAliases: [] + ## @param master.podLabels Extra labels for Redis™ master pods + ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ + ## + podLabels: {} + ## @param master.podAnnotations Annotations for Redis™ master pods + ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/ + ## + podAnnotations: {} + ## @param master.shareProcessNamespace Share a single process namespace between all of the containers in Redis™ master pods + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/share-process-namespace/ + ## + shareProcessNamespace: false + ## @param master.podAffinityPreset Pod affinity preset. Ignored if `master.affinity` is set. Allowed values: `soft` or `hard` + ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity + ## + podAffinityPreset: "" + ## @param master.podAntiAffinityPreset Pod anti-affinity preset. Ignored if `master.affinity` is set. Allowed values: `soft` or `hard` + ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity + ## + podAntiAffinityPreset: soft + ## Node master.affinity preset + ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#node-affinity + ## + nodeAffinityPreset: + ## @param master.nodeAffinityPreset.type Node affinity preset type. Ignored if `master.affinity` is set. Allowed values: `soft` or `hard` + ## + type: "" + ## @param master.nodeAffinityPreset.key Node label key to match. Ignored if `master.affinity` is set + ## + key: "" + ## @param master.nodeAffinityPreset.values Node label values to match. Ignored if `master.affinity` is set + ## E.g. + ## values: + ## - e2e-az1 + ## - e2e-az2 + ## + values: [] + ## @param master.affinity Affinity for Redis™ master pods assignment + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity + ## NOTE: `master.podAffinityPreset`, `master.podAntiAffinityPreset`, and `master.nodeAffinityPreset` will be ignored when it's set + ## + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: type + operator: In + values: + - default + ## @param master.nodeSelector Node labels for Redis™ master pods assignment + ## ref: https://kubernetes.io/docs/user-guide/node-selection/ + ## + nodeSelector: {} + ## @param master.tolerations Tolerations for Redis™ master pods assignment + ## ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ + ## + tolerations: [] + ## @param master.spreadConstraints Spread Constraints for Redis™ master pod assignment + ## ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/ + ## E.g. + ## spreadConstraints: + ## - maxSkew: 1 + ## topologyKey: node + ## whenUnsatisfiable: DoNotSchedule + ## + spreadConstraints: {} + ## @param master.lifecycleHooks for the Redis™ master container(s) to automate configuration before or after startup + ## + lifecycleHooks: {} + ## @param master.extraVolumes Optionally specify extra list of additional volumes for the Redis™ master pod(s) + ## + extraVolumes: [] + ## @param master.extraVolumeMounts Optionally specify extra list of additional volumeMounts for the Redis™ master container(s) + ## + extraVolumeMounts: [] + ## @param master.sidecars Add additional sidecar containers to the Redis™ master pod(s) + ## e.g: + ## sidecars: + ## - name: your-image-name + ## image: your-image + ## imagePullPolicy: Always + ## ports: + ## - name: portname + ## containerPort: 1234 + ## + sidecars: [] + ## @param master.initContainers Add additional init containers to the Redis™ master pod(s) + ## ref: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/ + ## e.g: + ## initContainers: + ## - name: your-image-name + ## image: your-image + ## imagePullPolicy: Always + ## command: ['sh', '-c', 'echo "hello world"'] + ## + initContainers: [] + ## Persistence parameters + ## ref: http://kubernetes.io/docs/user-guide/persistent-volumes/ + ## + persistence: + ## @param master.persistence.enabled Enable persistence on Redis™ master nodes using Persistent Volume Claims + ## + enabled: true + ## @param master.persistence.medium Provide a medium for `emptyDir` volumes. + ## + medium: "" + ## @param master.persistence.path The path the volume will be mounted at on Redis™ master containers + ## NOTE: Useful when using different Redis™ images + ## + path: /data + ## @param master.persistence.subPath The subdirectory of the volume to mount on Redis™ master containers + ## NOTE: Useful in dev environments + ## + subPath: "" + ## @param master.persistence.storageClass Persistent Volume storage class + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is set, choosing the default provisioner + ## + storageClass: "" + ## @param master.persistence.accessModes [array] Persistent Volume access modes + ## + accessModes: + - ReadWriteOnce + ## @param master.persistence.size Persistent Volume size + ## + size: 8Gi + ## @param master.persistence.annotations Additional custom annotations for the PVC + ## + annotations: {} + ## @param master.persistence.selector Additional labels to match for the PVC + ## e.g: + ## selector: + ## matchLabels: + ## app: my-app + ## + selector: {} + ## @param master.persistence.dataSource Custom PVC data source + dataSource: {} + ## @param master.persistence.existingClaim Use a existing PVC which must be created manually before bound + ## NOTE: requires master.persistence.enabled: true + ## + existingClaim: "" + ## Redis™ master service parameters + ## + service: + ## @param master.service.type Redis™ master service type + ## + type: ClusterIP + ## @param master.service.port Redis™ master service port + ## + port: 6379 + ## @param master.service.nodePort Node port for Redis™ master + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + ## NOTE: choose port between <30000-32767> + ## + nodePort: "" + ## @param master.service.externalTrafficPolicy Redis™ master service external traffic policy + ## ref: https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip + ## + externalTrafficPolicy: Cluster + ## @param master.service.clusterIP Redis™ master service Cluster IP + ## + clusterIP: "" + ## @param master.service.loadBalancerIP Redis™ master service Load Balancer IP + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer + ## + loadBalancerIP: "" + ## @param master.service.loadBalancerSourceRanges Redis™ master service Load Balancer sources + ## https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/#restrict-access-for-loadbalancer-service + ## e.g. + ## loadBalancerSourceRanges: + ## - 10.10.10.0/24 + ## + loadBalancerSourceRanges: [] + ## @param master.service.annotations Additional custom annotations for Redis™ master service + ## + annotations: {} + ## @param master.terminationGracePeriodSeconds Integer setting the termination grace period for the redis-master pods + ## + terminationGracePeriodSeconds: 30 + +## @section Redis™ replicas configuration parameters +## + +replica: + ## @param replica.replicaCount Number of Redis™ replicas to deploy + ## + replicaCount: 0 + ## @param replica.configuration Configuration for Redis™ replicas nodes + ## ref: https://redis.io/topics/config + ## + configuration: "" + ## @param replica.disableCommands [array] Array with Redis™ commands to disable on replicas nodes + ## Commands will be completely disabled by renaming each to an empty string. + ## ref: https://redis.io/topics/security#disabling-of-specific-commands + ## + disableCommands: + - FLUSHDB + - FLUSHALL + ## @param replica.command Override default container command (useful when using custom images) + ## + command: [] + ## @param replica.args Override default container args (useful when using custom images) + ## + args: [] + ## @param replica.preExecCmds Additional commands to run prior to starting Redis™ replicas + ## + preExecCmds: [] + ## @param replica.extraFlags Array with additional command line flags for Redis™ replicas + ## e.g: + ## extraFlags: + ## - "--maxmemory-policy volatile-ttl" + ## - "--repl-backlog-size 1024mb" + ## + extraFlags: [] + ## @param replica.extraEnvVars Array with extra environment variables to add to Redis™ replicas nodes + ## e.g: + ## extraEnvVars: + ## - name: FOO + ## value: "bar" + ## + extraEnvVars: [] + ## @param replica.extraEnvVarsCM Name of existing ConfigMap containing extra env vars for Redis™ replicas nodes + ## + extraEnvVarsCM: "" + ## @param replica.extraEnvVarsSecret Name of existing Secret containing extra env vars for Redis™ replicas nodes + ## + extraEnvVarsSecret: "" + ## @param replica.containerPort Container port to open on Redis™ replicas nodes + ## + containerPort: 6379 + ## Configure extra options for Redis™ containers' liveness and readiness probes + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes + ## @param replica.livenessProbe.enabled Enable livenessProbe on Redis™ replicas nodes + ## @param replica.livenessProbe.initialDelaySeconds Initial delay seconds for livenessProbe + ## @param replica.livenessProbe.periodSeconds Period seconds for livenessProbe + ## @param replica.livenessProbe.timeoutSeconds Timeout seconds for livenessProbe + ## @param replica.livenessProbe.failureThreshold Failure threshold for livenessProbe + ## @param replica.livenessProbe.successThreshold Success threshold for livenessProbe + ## + livenessProbe: + enabled: true + initialDelaySeconds: 20 + periodSeconds: 5 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 5 + ## @param replica.readinessProbe.enabled Enable readinessProbe on Redis™ replicas nodes + ## @param replica.readinessProbe.initialDelaySeconds Initial delay seconds for readinessProbe + ## @param replica.readinessProbe.periodSeconds Period seconds for readinessProbe + ## @param replica.readinessProbe.timeoutSeconds Timeout seconds for readinessProbe + ## @param replica.readinessProbe.failureThreshold Failure threshold for readinessProbe + ## @param replica.readinessProbe.successThreshold Success threshold for readinessProbe + ## + readinessProbe: + enabled: true + initialDelaySeconds: 20 + periodSeconds: 5 + timeoutSeconds: 1 + successThreshold: 1 + failureThreshold: 5 + ## @param replica.customLivenessProbe Custom livenessProbe that overrides the default one + ## + customLivenessProbe: {} + ## @param replica.customReadinessProbe Custom readinessProbe that overrides the default one + ## + customReadinessProbe: {} + ## Redis™ replicas resource requests and limits + ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ + ## @param replica.resources.limits The resources limits for the Redis™ replicas containers + ## @param replica.resources.requests The requested resources for the Redis™ replicas containers + ## + resources: + # We usually recommend not to specify default resources and to leave this as a conscious + # choice for the user. This also increases chances charts run on environments with little + # resources, such as Minikube. If you do want to specify resources, uncomment the following + # lines, adjust them as necessary, and remove the curly braces after 'resources:'. + limits: {} + # cpu: 250m + # memory: 256Mi + requests: {} + # cpu: 250m + # memory: 256Mi + ## Configure Pods Security Context + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod + ## @param replica.podSecurityContext.enabled Enabled Redis™ replicas pods' Security Context + ## @param replica.podSecurityContext.fsGroup Set Redis™ replicas pod's Security Context fsGroup + ## + podSecurityContext: + enabled: true + fsGroup: 1001 + ## Configure Container Security Context + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod + ## @param replica.containerSecurityContext.enabled Enabled Redis™ replicas containers' Security Context + ## @param replica.containerSecurityContext.runAsUser Set Redis™ replicas containers' Security Context runAsUser + ## + containerSecurityContext: + enabled: true + runAsUser: 1001 + ## @param replica.schedulerName Alternate scheduler for Redis™ replicas pods + ## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ + ## + schedulerName: "" + ## @param replica.updateStrategy.type Redis™ replicas statefulset strategy type + ## @skip replica.updateStrategy.rollingUpdate + ## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#update-strategies + ## + updateStrategy: + ## StrategyType + ## Can be set to RollingUpdate or OnDelete + ## + type: RollingUpdate + rollingUpdate: {} + ## @param replica.priorityClassName Redis™ replicas pods' priorityClassName + ## + priorityClassName: "" + ## @param replica.hostAliases Redis™ replicas pods host aliases + ## https://kubernetes.io/docs/concepts/services-networking/add-entries-to-pod-etc-hosts-with-host-aliases/ + ## + hostAliases: [] + ## @param replica.podLabels Extra labels for Redis™ replicas pods + ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ + ## + podLabels: {} + ## @param replica.podAnnotations Annotations for Redis™ replicas pods + ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/ + ## + podAnnotations: {} + ## @param replica.shareProcessNamespace Share a single process namespace between all of the containers in Redis™ replicas pods + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/share-process-namespace/ + ## + shareProcessNamespace: false + ## @param replica.podAffinityPreset Pod affinity preset. Ignored if `replica.affinity` is set. Allowed values: `soft` or `hard` + ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity + ## + podAffinityPreset: "" + ## @param replica.podAntiAffinityPreset Pod anti-affinity preset. Ignored if `replica.affinity` is set. Allowed values: `soft` or `hard` + ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity + ## + podAntiAffinityPreset: soft + ## Node affinity preset + ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#node-affinity + ## + nodeAffinityPreset: + ## @param replica.nodeAffinityPreset.type Node affinity preset type. Ignored if `replica.affinity` is set. Allowed values: `soft` or `hard` + ## + type: "" + ## @param replica.nodeAffinityPreset.key Node label key to match. Ignored if `replica.affinity` is set + ## + key: "" + ## @param replica.nodeAffinityPreset.values Node label values to match. Ignored if `replica.affinity` is set + ## E.g. + ## values: + ## - e2e-az1 + ## - e2e-az2 + ## + values: [] + ## @param replica.affinity Affinity for Redis™ replicas pods assignment + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity + ## NOTE: `replica.podAffinityPreset`, `replica.podAntiAffinityPreset`, and `replica.nodeAffinityPreset` will be ignored when it's set + ## + affinity: {} + ## @param replica.nodeSelector Node labels for Redis™ replicas pods assignment + ## ref: https://kubernetes.io/docs/user-guide/node-selection/ + ## + nodeSelector: {} + ## @param replica.tolerations Tolerations for Redis™ replicas pods assignment + ## ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ + ## + tolerations: [] + ## @param replica.spreadConstraints Spread Constraints for Redis™ replicas pod assignment + ## ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/ + ## E.g. + ## spreadConstraints: + ## - maxSkew: 1 + ## topologyKey: node + ## whenUnsatisfiable: DoNotSchedule + ## + spreadConstraints: {} + ## @param replica.lifecycleHooks for the Redis™ replica container(s) to automate configuration before or after startup + ## + lifecycleHooks: {} + ## @param replica.extraVolumes Optionally specify extra list of additional volumes for the Redis™ replicas pod(s) + ## + extraVolumes: [] + ## @param replica.extraVolumeMounts Optionally specify extra list of additional volumeMounts for the Redis™ replicas container(s) + ## + extraVolumeMounts: [] + ## @param replica.sidecars Add additional sidecar containers to the Redis™ replicas pod(s) + ## e.g: + ## sidecars: + ## - name: your-image-name + ## image: your-image + ## imagePullPolicy: Always + ## ports: + ## - name: portname + ## containerPort: 1234 + ## + sidecars: [] + ## @param replica.initContainers Add additional init containers to the Redis™ replicas pod(s) + ## ref: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/ + ## e.g: + ## initContainers: + ## - name: your-image-name + ## image: your-image + ## imagePullPolicy: Always + ## command: ['sh', '-c', 'echo "hello world"'] + ## + initContainers: [] + ## Persistence Parameters + ## ref: http://kubernetes.io/docs/user-guide/persistent-volumes/ + ## + persistence: + ## @param replica.persistence.enabled Enable persistence on Redis™ replicas nodes using Persistent Volume Claims + ## + enabled: true + ## @param replica.persistence.medium Provide a medium for `emptyDir` volumes. + ## + medium: "" + ## @param replica.persistence.path The path the volume will be mounted at on Redis™ replicas containers + ## NOTE: Useful when using different Redis™ images + ## + path: /data + ## @param replica.persistence.subPath The subdirectory of the volume to mount on Redis™ replicas containers + ## NOTE: Useful in dev environments + ## + subPath: "" + ## @param replica.persistence.storageClass Persistent Volume storage class + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is set, choosing the default provisioner + ## + storageClass: "" + ## @param replica.persistence.accessModes [array] Persistent Volume access modes + ## + accessModes: + - ReadWriteOnce + ## @param replica.persistence.size Persistent Volume size + ## + size: 8Gi + ## @param replica.persistence.annotations Additional custom annotations for the PVC + ## + annotations: {} + ## @param replica.persistence.selector Additional labels to match for the PVC + ## e.g: + ## selector: + ## matchLabels: + ## app: my-app + ## + selector: {} + ## @param replica.persistence.dataSource Custom PVC data source + dataSource: {} + ## Redis™ replicas service parameters + ## + service: + ## @param replica.service.type Redis™ replicas service type + ## + type: ClusterIP + ## @param replica.service.port Redis™ replicas service port + ## + port: 6379 + ## @param replica.service.nodePort Node port for Redis™ replicas + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + ## NOTE: choose port between <30000-32767> + ## + nodePort: "" + ## @param replica.service.externalTrafficPolicy Redis™ replicas service external traffic policy + ## ref: https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip + ## + externalTrafficPolicy: Cluster + ## @param replica.service.clusterIP Redis™ replicas service Cluster IP + ## + clusterIP: "" + ## @param replica.service.loadBalancerIP Redis™ replicas service Load Balancer IP + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer + ## + loadBalancerIP: "" + ## @param replica.service.loadBalancerSourceRanges Redis™ replicas service Load Balancer sources + ## https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/#restrict-access-for-loadbalancer-service + ## e.g. + ## loadBalancerSourceRanges: + ## - 10.10.10.0/24 + ## + loadBalancerSourceRanges: [] + ## @param replica.service.annotations Additional custom annotations for Redis™ replicas service + ## + annotations: {} + ## @param replica.terminationGracePeriodSeconds Integer setting the termination grace period for the redis-replicas pods + ## + terminationGracePeriodSeconds: 30 + ## Autoscaling configuration + ## + autoscaling: + ## @param replica.autoscaling.enabled Enable replica autoscaling settings + ## + enabled: false + ## @param replica.autoscaling.minReplicas Minimum replicas for the pod autoscaling + ## + minReplicas: 1 + ## @param replica.autoscaling.maxReplicas Maximum replicas for the pod autoscaling + ## + maxReplicas: 11 + ## @param replica.autoscaling.targetCPU Percentage of CPU to consider when autoscaling + ## + targetCPU: "" + ## @param replica.autoscaling.targetMemory Percentage of Memory to consider when autoscaling + ## + targetMemory: "" + +## @section Redis™ Sentinel configuration parameters +## + +sentinel: + ## @param sentinel.enabled Use Redis™ Sentinel on Redis™ pods. + ## IMPORTANT: this will disable the master and replicas services and + ## create a single Redis™ service exposing both the Redis and Sentinel ports + ## + enabled: false + ## Bitnami Redis™ Sentinel image version + ## ref: https://hub.docker.com/r/bitnami/redis-sentinel/tags/ + ## @param sentinel.image.registry Redis™ Sentinel image registry + ## @param sentinel.image.repository Redis™ Sentinel image repository + ## @param sentinel.image.tag Redis™ Sentinel image tag (immutable tags are recommended) + ## @param sentinel.image.pullPolicy Redis™ Sentinel image pull policy + ## @param sentinel.image.pullSecrets Redis™ Sentinel image pull secrets + ## @param sentinel.image.debug Enable image debug mode + ## + image: + registry: docker.io + repository: bitnami/redis-sentinel + tag: 6.2.6-debian-10-r20 + ## Specify a imagePullPolicy + ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## e.g: + ## pullSecrets: + ## - myRegistryKeySecretName + ## + pullSecrets: [] + ## Enable debug mode + ## + debug: false + ## @param sentinel.masterSet Master set name + ## + masterSet: mymaster + ## @param sentinel.quorum Sentinel Quorum + ## + quorum: 2 + ## @param sentinel.automateClusterRecovery Automate cluster recovery in cases where the last replica is not considered a good replica and Sentinel won't automatically failover to it. + ## This also prevents any new replica from starting until the last remaining replica is elected as master to guarantee that it is the one to be elected by Sentinel, and not a newly started replica with no data. + ## NOTE: This feature requires a "downAfterMilliseconds" value less or equal to 2000. + ## + automateClusterRecovery: false + ## Sentinel timing restrictions + ## @param sentinel.downAfterMilliseconds Timeout for detecting a Redis™ node is down + ## @param sentinel.failoverTimeout Timeout for performing a election failover + ## + downAfterMilliseconds: 60000 + failoverTimeout: 18000 + ## @param sentinel.parallelSyncs Number of replicas that can be reconfigured in parallel to use the new master after a failover + ## + parallelSyncs: 1 + ## @param sentinel.configuration Configuration for Redis™ Sentinel nodes + ## ref: https://redis.io/topics/sentinel + ## + configuration: "" + ## @param sentinel.command Override default container command (useful when using custom images) + ## + command: [] + ## @param sentinel.args Override default container args (useful when using custom images) + ## + args: [] + ## @param sentinel.preExecCmds Additional commands to run prior to starting Redis™ Sentinel + ## + preExecCmds: [] + ## @param sentinel.extraEnvVars Array with extra environment variables to add to Redis™ Sentinel nodes + ## e.g: + ## extraEnvVars: + ## - name: FOO + ## value: "bar" + ## + extraEnvVars: [] + ## @param sentinel.extraEnvVarsCM Name of existing ConfigMap containing extra env vars for Redis™ Sentinel nodes + ## + extraEnvVarsCM: "" + ## @param sentinel.extraEnvVarsSecret Name of existing Secret containing extra env vars for Redis™ Sentinel nodes + ## + extraEnvVarsSecret: "" + ## @param sentinel.containerPort Container port to open on Redis™ Sentinel nodes + ## + containerPort: 26379 + ## Configure extra options for Redis™ containers' liveness and readiness probes + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes + ## @param sentinel.livenessProbe.enabled Enable livenessProbe on Redis™ Sentinel nodes + ## @param sentinel.livenessProbe.initialDelaySeconds Initial delay seconds for livenessProbe + ## @param sentinel.livenessProbe.periodSeconds Period seconds for livenessProbe + ## @param sentinel.livenessProbe.timeoutSeconds Timeout seconds for livenessProbe + ## @param sentinel.livenessProbe.failureThreshold Failure threshold for livenessProbe + ## @param sentinel.livenessProbe.successThreshold Success threshold for livenessProbe + ## + livenessProbe: + enabled: true + initialDelaySeconds: 20 + periodSeconds: 5 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 5 + ## @param sentinel.readinessProbe.enabled Enable readinessProbe on Redis™ Sentinel nodes + ## @param sentinel.readinessProbe.initialDelaySeconds Initial delay seconds for readinessProbe + ## @param sentinel.readinessProbe.periodSeconds Period seconds for readinessProbe + ## @param sentinel.readinessProbe.timeoutSeconds Timeout seconds for readinessProbe + ## @param sentinel.readinessProbe.failureThreshold Failure threshold for readinessProbe + ## @param sentinel.readinessProbe.successThreshold Success threshold for readinessProbe + ## + readinessProbe: + enabled: true + initialDelaySeconds: 20 + periodSeconds: 5 + timeoutSeconds: 1 + successThreshold: 1 + failureThreshold: 5 + ## @param sentinel.customLivenessProbe Custom livenessProbe that overrides the default one + ## + customLivenessProbe: {} + ## @param sentinel.customReadinessProbe Custom readinessProbe that overrides the default one + ## + customReadinessProbe: {} + ## Redis™ Sentinel resource requests and limits + ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ + ## @param sentinel.resources.limits The resources limits for the Redis™ Sentinel containers + ## @param sentinel.resources.requests The requested resources for the Redis™ Sentinel containers + ## + resources: + limits: {} + requests: {} + ## Configure Container Security Context + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod + ## @param sentinel.containerSecurityContext.enabled Enabled Redis™ Sentinel containers' Security Context + ## @param sentinel.containerSecurityContext.runAsUser Set Redis™ Sentinel containers' Security Context runAsUser + ## + containerSecurityContext: + enabled: true + runAsUser: 1001 + ## @param sentinel.lifecycleHooks for the Redis™ sentinel container(s) to automate configuration before or after startup + ## + lifecycleHooks: {} + ## @param sentinel.extraVolumes Optionally specify extra list of additional volumes for the Redis™ Sentinel + ## + extraVolumes: [] + ## @param sentinel.extraVolumeMounts Optionally specify extra list of additional volumeMounts for the Redis™ Sentinel container(s) + ## + extraVolumeMounts: [] + ## Redis™ Sentinel service parameters + ## + service: + ## @param sentinel.service.type Redis™ Sentinel service type + ## + type: ClusterIP + ## @param sentinel.service.port Redis™ service port for Redis™ + ## + port: 6379 + ## @param sentinel.service.sentinelPort Redis™ service port for Sentinel + ## + sentinelPort: 26379 + ## @param sentinel.service.nodePorts.redis Node port for Redis™ + ## @param sentinel.service.nodePorts.sentinel Node port for Sentinel + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + ## NOTE: choose port between <30000-32767> + ## NOTE: By leaving these values blank, they will be generated by ports-configmap + ## If setting manually, please leave at least replica.replicaCount + 1 in between sentinel.service.nodePorts.redis and sentinel.service.nodePorts.sentinel to take into account the ports that will be created while incrementing that base port + ## + nodePorts: + redis: "" + sentinel: "" + ## @param sentinel.service.externalTrafficPolicy Redis™ Sentinel service external traffic policy + ## ref: https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip + ## + externalTrafficPolicy: Cluster + ## @param sentinel.service.clusterIP Redis™ Sentinel service Cluster IP + ## + clusterIP: "" + ## @param sentinel.service.loadBalancerIP Redis™ Sentinel service Load Balancer IP + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer + ## + loadBalancerIP: "" + ## @param sentinel.service.loadBalancerSourceRanges Redis™ Sentinel service Load Balancer sources + ## https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/#restrict-access-for-loadbalancer-service + ## e.g. + ## loadBalancerSourceRanges: + ## - 10.10.10.0/24 + ## + loadBalancerSourceRanges: [] + ## @param sentinel.service.annotations Additional custom annotations for Redis™ Sentinel service + ## + annotations: {} + ## @param sentinel.terminationGracePeriodSeconds Integer setting the termination grace period for the redis-node pods + ## + terminationGracePeriodSeconds: 30 + +## @section Other Parameters +## + +## Network Policy configuration +## ref: https://kubernetes.io/docs/concepts/services-networking/network-policies/ +## +networkPolicy: + ## @param networkPolicy.enabled Enable creation of NetworkPolicy resources + ## + enabled: false + ## @param networkPolicy.allowExternal Don't require client label for connections + ## When set to false, only pods with the correct client label will have network access to the ports + ## Redis™ is listening on. When true, Redis™ will accept connections from any source + ## (with the correct destination port). + ## + allowExternal: true + ## @param networkPolicy.extraIngress Add extra ingress rules to the NetworkPolicy + ## e.g: + ## extraIngress: + ## - ports: + ## - port: 1234 + ## from: + ## - podSelector: + ## - matchLabels: + ## - role: frontend + ## - podSelector: + ## - matchExpressions: + ## - key: role + ## operator: In + ## values: + ## - frontend + ## + extraIngress: [] + ## @param networkPolicy.extraEgress Add extra ingress rules to the NetworkPolicy + ## e.g: + ## extraEgress: + ## - ports: + ## - port: 1234 + ## to: + ## - podSelector: + ## - matchLabels: + ## - role: frontend + ## - podSelector: + ## - matchExpressions: + ## - key: role + ## operator: In + ## values: + ## - frontend + ## + extraEgress: [] + ## @param networkPolicy.ingressNSMatchLabels Labels to match to allow traffic from other namespaces + ## @param networkPolicy.ingressNSPodMatchLabels Pod labels to match to allow traffic from other namespaces + ## + ingressNSMatchLabels: {} + ingressNSPodMatchLabels: {} +## PodSecurityPolicy configuration +## ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/ +## +podSecurityPolicy: + ## @param podSecurityPolicy.create Whether to create a PodSecurityPolicy. WARNING: PodSecurityPolicy is deprecated in Kubernetes v1.21 or later, unavailable in v1.25 or later + ## + create: false + ## @param podSecurityPolicy.enabled Enable PodSecurityPolicy's RBAC rules + ## + enabled: false +## RBAC configuration +## +rbac: + ## @param rbac.create Specifies whether RBAC resources should be created + ## + create: false + ## @param rbac.rules Custom RBAC rules to set + ## e.g: + ## rules: + ## - apiGroups: + ## - "" + ## resources: + ## - pods + ## verbs: + ## - get + ## - list + ## + rules: [] +## ServiceAccount configuration +## +serviceAccount: + ## @param serviceAccount.create Specifies whether a ServiceAccount should be created + ## + create: true + ## @param serviceAccount.name The name of the ServiceAccount to use. + ## If not set and create is true, a name is generated using the common.names.fullname template + ## + name: "" + ## @param serviceAccount.automountServiceAccountToken Whether to auto mount the service account token + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/#use-the-default-service-account-to-access-the-api-server + ## + automountServiceAccountToken: true + ## @param serviceAccount.annotations Additional custom annotations for the ServiceAccount + ## + annotations: {} +## Redis™ Pod Disruption Budget configuration +## ref: https://kubernetes.io/docs/tasks/run-application/configure-pdb/ +## +pdb: + ## @param pdb.create Specifies whether a PodDisruptionBudget should be created + ## + create: false + ## @param pdb.minAvailable Min number of pods that must still be available after the eviction + ## + minAvailable: 1 + ## @param pdb.maxUnavailable Max number of pods that can be unavailable after the eviction + ## + maxUnavailable: "" +## TLS configuration +## +tls: + ## @param tls.enabled Enable TLS traffic + ## + enabled: false + ## @param tls.authClients Require clients to authenticate + ## + authClients: true + ## @param tls.autoGenerated Enable autogenerated certificates + ## + autoGenerated: false + ## @param tls.existingSecret The name of the existing secret that contains the TLS certificates + ## + existingSecret: "" + ## @param tls.certificatesSecret DEPRECATED. Use existingSecret instead. + ## + certificatesSecret: "" + ## @param tls.certFilename Certificate filename + ## + certFilename: "" + ## @param tls.certKeyFilename Certificate Key filename + ## + certKeyFilename: "" + ## @param tls.certCAFilename CA Certificate filename + ## + certCAFilename: "" + ## @param tls.dhParamsFilename File containing DH params (in order to support DH based ciphers) + ## + dhParamsFilename: "" + +## @section Metrics Parameters +## + +metrics: + ## @param metrics.enabled Start a sidecar prometheus exporter to expose Redis™ metrics + ## + enabled: false + ## Bitnami Redis™ Exporter image + ## ref: https://hub.docker.com/r/bitnami/redis-exporter/tags/ + ## @param metrics.image.registry Redis™ Exporter image registry + ## @param metrics.image.repository Redis™ Exporter image repository + ## @param metrics.image.tag Redis™ Redis™ Exporter image tag (immutable tags are recommended) + ## @param metrics.image.pullPolicy Redis™ Exporter image pull policy + ## @param metrics.image.pullSecrets Redis™ Exporter image pull secrets + ## + image: + registry: docker.io + repository: bitnami/redis-exporter + tag: 1.29.0-debian-10-r6 + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## e.g: + ## pullSecrets: + ## - myRegistryKeySecretName + ## + pullSecrets: [] + ## @param metrics.redisTargetHost A way to specify an alternative Redis™ hostname + ## Useful for certificate CN/SAN matching + ## + redisTargetHost: "localhost" + ## @param metrics.extraArgs Extra arguments for Redis™ exporter, for example: + ## e.g.: + ## extraArgs: + ## check-keys: myKey,myOtherKey + ## + extraArgs: {} + ## Configure Container Security Context + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod + ## @param metrics.containerSecurityContext.enabled Enabled Redis™ exporter containers' Security Context + ## @param metrics.containerSecurityContext.runAsUser Set Redis™ exporter containers' Security Context runAsUser + ## + containerSecurityContext: + enabled: true + runAsUser: 1001 + ## @param metrics.extraVolumes Optionally specify extra list of additional volumes for the Redis™ metrics sidecar + ## + extraVolumes: [] + ## @param metrics.extraVolumeMounts Optionally specify extra list of additional volumeMounts for the Redis™ metrics sidecar + ## + extraVolumeMounts: [] + ## Redis™ exporter resource requests and limits + ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ + ## @param metrics.resources.limits The resources limits for the Redis™ exporter container + ## @param metrics.resources.requests The requested resources for the Redis™ exporter container + ## + resources: + limits: {} + requests: {} + ## @param metrics.podLabels Extra labels for Redis™ exporter pods + ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ + ## + podLabels: {} + ## @param metrics.podAnnotations [object] Annotations for Redis™ exporter pods + ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/ + ## + podAnnotations: + prometheus.io/scrape: "true" + prometheus.io/port: "9121" + ## Redis™ exporter service parameters + ## + service: + ## @param metrics.service.type Redis™ exporter service type + ## + type: ClusterIP + ## @param metrics.service.port Redis™ exporter service port + ## + port: 9121 + ## @param metrics.service.externalTrafficPolicy Redis™ exporter service external traffic policy + ## ref: https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip + ## + externalTrafficPolicy: Cluster + ## @param metrics.service.loadBalancerIP Redis™ exporter service Load Balancer IP + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer + ## + loadBalancerIP: "" + ## @param metrics.service.loadBalancerSourceRanges Redis™ exporter service Load Balancer sources + ## https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/#restrict-access-for-loadbalancer-service + ## e.g. + ## loadBalancerSourceRanges: + ## - 10.10.10.0/24 + ## + loadBalancerSourceRanges: [] + ## @param metrics.service.annotations Additional custom annotations for Redis™ exporter service + ## + annotations: {} + ## Prometheus Service Monitor + ## ref: https://github.com/coreos/prometheus-operator + ## https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#endpoint + ## + serviceMonitor: + ## @param metrics.serviceMonitor.enabled Create ServiceMonitor resource(s) for scraping metrics using PrometheusOperator + ## + enabled: false + ## @param metrics.serviceMonitor.namespace The namespace in which the ServiceMonitor will be created + ## + namespace: "" + ## @param metrics.serviceMonitor.interval The interval at which metrics should be scraped + ## + interval: 30s + ## @param metrics.serviceMonitor.scrapeTimeout The timeout after which the scrape is ended + ## + scrapeTimeout: "" + ## @param metrics.serviceMonitor.relabellings Metrics RelabelConfigs to apply to samples before scraping. + ## + relabellings: [] + ## @param metrics.serviceMonitor.metricRelabelings Metrics RelabelConfigs to apply to samples before ingestion. + ## + metricRelabelings: [] + ## @param metrics.serviceMonitor.honorLabels Specify honorLabels parameter to add the scrape endpoint + ## + honorLabels: false + ## @param metrics.serviceMonitor.additionalLabels Additional labels that can be used so ServiceMonitor resource(s) can be discovered by Prometheus + ## + additionalLabels: {} + ## Custom PrometheusRule to be defined + ## ref: https://github.com/coreos/prometheus-operator#customresourcedefinitions + ## + prometheusRule: + ## @param metrics.prometheusRule.enabled Create a custom prometheusRule Resource for scraping metrics using PrometheusOperator + ## + enabled: false + ## @param metrics.prometheusRule.namespace The namespace in which the prometheusRule will be created + ## + namespace: "" + ## @param metrics.prometheusRule.additionalLabels Additional labels for the prometheusRule + ## + additionalLabels: {} + ## @param metrics.prometheusRule.rules Custom Prometheus rules + ## e.g: + ## rules: + ## - alert: RedisDown + ## expr: redis_up{service="{{ template "common.names.fullname" . }}-metrics"} == 0 + ## for: 2m + ## labels: + ## severity: error + ## annotations: + ## summary: Redis™ instance {{ "{{ $labels.instance }}" }} down + ## description: Redis™ instance {{ "{{ $labels.instance }}" }} is down + ## - alert: RedisMemoryHigh + ## expr: > + ## redis_memory_used_bytes{service="{{ template "common.names.fullname" . }}-metrics"} * 100 + ## / + ## redis_memory_max_bytes{service="{{ template "common.names.fullname" . }}-metrics"} + ## > 90 + ## for: 2m + ## labels: + ## severity: error + ## annotations: + ## summary: Redis™ instance {{ "{{ $labels.instance }}" }} is using too much memory + ## description: | + ## Redis™ instance {{ "{{ $labels.instance }}" }} is using {{ "{{ $value }}" }}% of its available memory. + ## - alert: RedisKeyEviction + ## expr: | + ## increase(redis_evicted_keys_total{service="{{ template "common.names.fullname" . }}-metrics"}[5m]) > 0 + ## for: 1s + ## labels: + ## severity: error + ## annotations: + ## summary: Redis™ instance {{ "{{ $labels.instance }}" }} has evicted keys + ## description: | + ## Redis™ instance {{ "{{ $labels.instance }}" }} has evicted {{ "{{ $value }}" }} keys in the last 5 minutes. + ## + rules: [] + +## @section Init Container Parameters +## + +## 'volumePermissions' init container parameters +## Changes the owner and group of the persistent volume mount point to runAsUser:fsGroup values +## based on the *podSecurityContext/*containerSecurityContext parameters +## +volumePermissions: + ## @param volumePermissions.enabled Enable init container that changes the owner/group of the PV mount point to `runAsUser:fsGroup` + ## + enabled: false + ## Bitnami Shell image + ## ref: https://hub.docker.com/r/bitnami/bitnami-shell/tags/ + ## @param volumePermissions.image.registry Bitnami Shell image registry + ## @param volumePermissions.image.repository Bitnami Shell image repository + ## @param volumePermissions.image.tag Bitnami Shell image tag (immutable tags are recommended) + ## @param volumePermissions.image.pullPolicy Bitnami Shell image pull policy + ## @param volumePermissions.image.pullSecrets Bitnami Shell image pull secrets + ## + image: + registry: docker.io + repository: bitnami/bitnami-shell + tag: 10-debian-10-r232 + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## e.g: + ## pullSecrets: + ## - myRegistryKeySecretName + ## + pullSecrets: [] + ## Init container's resource requests and limits + ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ + ## @param volumePermissions.resources.limits The resources limits for the init container + ## @param volumePermissions.resources.requests The requested resources for the init container + ## + resources: + limits: {} + requests: {} + ## Init container Container Security Context + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-container + ## @param volumePermissions.containerSecurityContext.runAsUser Set init container's Security Context runAsUser + ## NOTE: when runAsUser is set to special value "auto", init container will try to chown the + ## data folder to auto-determined user&group, using commands: `id -u`:`id -G | cut -d" " -f2` + ## "auto" is especially useful for OpenShift which has scc with dynamic user ids (and 0 is not allowed) + ## + containerSecurityContext: + runAsUser: 0 + +## init-sysctl container parameters +## used to perform sysctl operation to modify Kernel settings (needed sometimes to avoid warnings) +## +sysctl: + ## @param sysctl.enabled Enable init container to modify Kernel settings + ## + enabled: false + ## Bitnami Shell image + ## ref: https://hub.docker.com/r/bitnami/bitnami-shell/tags/ + ## @param sysctl.image.registry Bitnami Shell image registry + ## @param sysctl.image.repository Bitnami Shell image repository + ## @param sysctl.image.tag Bitnami Shell image tag (immutable tags are recommended) + ## @param sysctl.image.pullPolicy Bitnami Shell image pull policy + ## @param sysctl.image.pullSecrets Bitnami Shell image pull secrets + ## + image: + registry: docker.io + repository: bitnami/bitnami-shell + tag: 10-debian-10-r232 + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## e.g: + ## pullSecrets: + ## - myRegistryKeySecretName + ## + pullSecrets: [] + ## @param sysctl.command Override default init-sysctl container command (useful when using custom images) + ## + command: [] + ## @param sysctl.mountHostSys Mount the host `/sys` folder to `/host-sys` + ## + mountHostSys: false + ## Init container's resource requests and limits + ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ + ## @param sysctl.resources.limits The resources limits for the init container + ## @param sysctl.resources.requests The requested resources for the init container + ## + resources: + limits: {} + requests: {} diff --git a/infrastructure/kubernetes/modules/redis/variable.tf b/infrastructure/kubernetes/modules/gcp/redis/variable.tf similarity index 64% rename from infrastructure/kubernetes/modules/redis/variable.tf rename to infrastructure/kubernetes/modules/gcp/redis/variable.tf index e64d61db7b..3953b03bff 100644 --- a/infrastructure/kubernetes/modules/redis/variable.tf +++ b/infrastructure/kubernetes/modules/gcp/redis/variable.tf @@ -1,9 +1,9 @@ -variable "cluster_name" { +variable "namespace" { type = string - description = "The k8s cluster name" + description = "The k8s namespace to use" } -variable "namespace" { +variable "region" { type = string - description = "The k8s namespace to use" + description = "GCP region" } diff --git a/infrastructure/kubernetes/modules/gcp/redis/versions.tf b/infrastructure/kubernetes/modules/gcp/redis/versions.tf new file mode 100644 index 0000000000..9bd04dabf0 --- /dev/null +++ b/infrastructure/kubernetes/modules/gcp/redis/versions.tf @@ -0,0 +1,14 @@ +terraform { + required_providers { + kubernetes = { + source = "hashicorp/kubernetes" + version = "~> 2.14.0" + } + + helm = { + source = "hashicorp/helm" + version = "~> 2.7.0" + } + } + required_version = "~> 1.3.2" +} diff --git a/infrastructure/kubernetes/modules/gcp/secrets/main.tf b/infrastructure/kubernetes/modules/gcp/secrets/main.tf new file mode 100644 index 0000000000..c6c423af5c --- /dev/null +++ b/infrastructure/kubernetes/modules/gcp/secrets/main.tf @@ -0,0 +1,127 @@ +resource "google_project_service" "secret_manager_api" { + service = "secretmanager.googleapis.com" + disable_on_destroy = false +} + +resource "google_secret_manager_secret" "api_secret" { + secret_id = "api-secret-${var.namespace}" + + replication { + user_managed { + replicas { + location = var.region + } + } + } + + depends_on = [google_project_service.secret_manager_api] +} + +resource "google_secret_manager_secret_version" "api_secret_version" { + secret = google_secret_manager_secret.api_secret.id + + secret_data = jsonencode(local.api_secret_json) +} + +locals { + postgres_secret_json = { + username = "landgriffon-${var.namespace}" + password = random_password.postgresql_user_generator.result + database = "landgriffon-${var.namespace}" + } + + api_secret_json = { + jwt_secret = random_password.jwt_secret_generator.result + gmaps_api_key = var.gmaps_api_key + } +} + +# JWT +resource "random_password" "jwt_secret_generator" { + length = 64 + special = true +} + +resource "kubernetes_secret" "api_secret" { + metadata { + name = "api" + namespace = var.namespace + } + + data = { + JWT_SECRET = local.api_secret_json.jwt_secret + GMAPS_API_KEY = local.api_secret_json.gmaps_api_key + } +} + +#Postgres +resource "random_password" "postgresql_user_generator" { + length = 24 + special = true + lifecycle { + ignore_changes = [ + length, + lower, + min_lower, + min_numeric, + min_special, + min_upper, + numeric, + special, + upper, + + ] + } +} + +resource "google_secret_manager_secret" "postgres_user_secret" { + secret_id = "landgriffon-${var.namespace}-postgresql-user-password" + + replication { + user_managed { + replicas { + location = var.region + } + } + } + + depends_on = [google_project_service.secret_manager_api] +} + +resource "google_secret_manager_secret_version" "postgres_user_secret_version" { + secret = google_secret_manager_secret.postgres_user_secret.id + + secret_data = jsonencode(local.postgres_secret_json) +} + +resource "kubernetes_secret" "db_secret" { + metadata { + name = "db" + namespace = var.namespace + } + + data = { + DB_HOST = "postgres-postgresql.${var.namespace}.svc.cluster.local" + DB_USERNAME = sensitive(local.postgres_secret_json.username) + DB_PASSWORD = sensitive(local.postgres_secret_json.password) + DB_DATABASE = sensitive(local.postgres_secret_json.database) + REDIS_HOST = "redis-master.${var.namespace}.svc.cluster.local" + } +} + + +resource "kubernetes_secret" "data_secret" { + metadata { + name = "data" + namespace = var.namespace + } + + data = { + AWS_ACCESS_KEY_ID = var.aws_access_key_id + AWS_SECRET_ACCESS_KEY = var.aws_secret_access_key + } +} + + + + diff --git a/infrastructure/kubernetes/modules/gcp/secrets/outputs.tf b/infrastructure/kubernetes/modules/gcp/secrets/outputs.tf new file mode 100644 index 0000000000..f4e97e74bd --- /dev/null +++ b/infrastructure/kubernetes/modules/gcp/secrets/outputs.tf @@ -0,0 +1,11 @@ +output "postgres_username" { + value = local.postgres_secret_json.username +} + +output "postgres_password" { + value = local.postgres_secret_json.password +} + +output "postgres_database" { + value = local.postgres_secret_json.database +} diff --git a/infrastructure/kubernetes/modules/gcp/secrets/variable.tf b/infrastructure/kubernetes/modules/gcp/secrets/variable.tf new file mode 100644 index 0000000000..3381abdc82 --- /dev/null +++ b/infrastructure/kubernetes/modules/gcp/secrets/variable.tf @@ -0,0 +1,32 @@ +variable "namespace" { + type = string + description = "The k8s namespace to use" +} + +variable "tf_state_bucket" { + type = string + description = "The name of the S3 bucket where the state is stored" +} + +variable "gmaps_api_key" { + type = string + sensitive = true + description = "The Google Maps API key used for access to the geocoding API" +} + +variable "region" { + type = string + description = "GCP region" +} + +variable "aws_access_key_id" { + type = string + description = "AWS access key id to read data from the science S3 bucket" + sensitive = true +} + +variable "aws_secret_access_key" { + type = string + description = "AWS secret access key to read data from the science S3 bucket" + sensitive = true +} diff --git a/infrastructure/kubernetes/modules/gcp/secrets/versions.tf b/infrastructure/kubernetes/modules/gcp/secrets/versions.tf new file mode 100644 index 0000000000..c5cc49e237 --- /dev/null +++ b/infrastructure/kubernetes/modules/gcp/secrets/versions.tf @@ -0,0 +1,14 @@ +terraform { + required_providers { + kubernetes = { + source = "hashicorp/kubernetes" + version = "~> 2.14.0" + } + + random = { + source = "hashicorp/random" + version = "~> 3.4.3" + } + } + required_version = "~> 1.3.2" +} diff --git a/infrastructure/kubernetes/modules/k8s_infrastructure/main.tf b/infrastructure/kubernetes/modules/k8s_infrastructure/main.tf index fc086db406..2d0266bd0e 100644 --- a/infrastructure/kubernetes/modules/k8s_infrastructure/main.tf +++ b/infrastructure/kubernetes/modules/k8s_infrastructure/main.tf @@ -1,7 +1,3 @@ -data "aws_eks_cluster_auth" "cluster" { - name = var.cluster_name -} - // https://docs.aws.amazon.com/eks/latest/userguide/cluster-autoscaler.html // AWS Cluster autoscaler // File has changes - see link above for details diff --git a/infrastructure/kubernetes/modules/k8s_infrastructure/versions.tf b/infrastructure/kubernetes/modules/k8s_infrastructure/versions.tf index b7ac17aa3d..027758df86 100644 --- a/infrastructure/kubernetes/modules/k8s_infrastructure/versions.tf +++ b/infrastructure/kubernetes/modules/k8s_infrastructure/versions.tf @@ -14,6 +14,11 @@ terraform { source = "hashicorp/helm" version = "~> 2.7.0" } + + kubernetes = { + source = "hashicorp/kubernetes" + version = "~> 2.14.0" + } } required_version = "~> 1.3.2" } diff --git a/infrastructure/kubernetes/modules/k8s_namespace/main.tf b/infrastructure/kubernetes/modules/k8s_namespace/main.tf index 8e536b0b4d..579108edbc 100644 --- a/infrastructure/kubernetes/modules/k8s_namespace/main.tf +++ b/infrastructure/kubernetes/modules/k8s_namespace/main.tf @@ -1,7 +1,3 @@ -data "aws_eks_cluster_auth" "cluster" { - name = var.cluster_name -} - resource "kubernetes_namespace" "namespace" { metadata { name = var.namespace diff --git a/infrastructure/kubernetes/modules/k8s_namespace/variables.tf b/infrastructure/kubernetes/modules/k8s_namespace/variables.tf index b63573bacd..97feb70cab 100644 --- a/infrastructure/kubernetes/modules/k8s_namespace/variables.tf +++ b/infrastructure/kubernetes/modules/k8s_namespace/variables.tf @@ -1,8 +1,3 @@ -variable "cluster_name" { - type = string - description = "The k8s cluster name" -} - variable "namespace" { description = "Namespace name" type = string diff --git a/infrastructure/kubernetes/modules/k8s_namespace/versions.tf b/infrastructure/kubernetes/modules/k8s_namespace/versions.tf index 75d7f74e95..94cc74fac3 100644 --- a/infrastructure/kubernetes/modules/k8s_namespace/versions.tf +++ b/infrastructure/kubernetes/modules/k8s_namespace/versions.tf @@ -4,10 +4,6 @@ terraform { source = "hashicorp/kubernetes" version = "~> 2.14.0" } - aws = { - source = "hashicorp/aws" - version = "~> 4.34.0" - } } required_version = "~> 1.3.2" } diff --git a/infrastructure/kubernetes/modules/tiler/main.tf b/infrastructure/kubernetes/modules/tiler/main.tf index 5814771448..087de9c0e0 100644 --- a/infrastructure/kubernetes/modules/tiler/main.tf +++ b/infrastructure/kubernetes/modules/tiler/main.tf @@ -1,7 +1,3 @@ -data "aws_eks_cluster_auth" "cluster" { - name = var.cluster_name -} - resource "kubernetes_service" "tiler_service" { metadata { name = kubernetes_deployment.tiler_deployment.metadata[0].name diff --git a/infrastructure/kubernetes/modules/tiler/variable.tf b/infrastructure/kubernetes/modules/tiler/variable.tf index c70315eede..6966ccbcd6 100644 --- a/infrastructure/kubernetes/modules/tiler/variable.tf +++ b/infrastructure/kubernetes/modules/tiler/variable.tf @@ -1,8 +1,3 @@ -variable "cluster_name" { - type = string - description = "The k8s cluster name" -} - variable "image" { type = string description = "The dockerhub image reference to deploy" diff --git a/infrastructure/kubernetes/variables.tf b/infrastructure/kubernetes/variables.tf index 7c9cbecebe..407fec7b31 100644 --- a/infrastructure/kubernetes/variables.tf +++ b/infrastructure/kubernetes/variables.tf @@ -20,6 +20,21 @@ variable "aws_region" { description = "A valid AWS region to configure the underlying AWS SDK." } +variable "gcp_region" { + type = string + description = "A valid GCP region to configure the underlying GCP SDK." +} + +variable "gcp_project_id" { + type = string + description = "A valid GCP project id to configure the underlying GCP SDK." +} + +variable "gcp_zone" { + description = "A valid GCP zone to configure the underlying GCP SDK." + type = string +} + variable "tf_state_bucket" { type = string description = "The name of the S3 bucket where the state is stored" @@ -46,6 +61,10 @@ variable "mapbox_api_token" { description = "Token to access the Mapbox API" } -variable "environments" { - description = "A list of environments" +variable "aws_environments" { + description = "A list of AWS environments" +} + +variable "gcp_environments" { + description = "A list of GCP environments" } diff --git a/infrastructure/kubernetes/vars/terraform.tfvars b/infrastructure/kubernetes/vars/terraform.tfvars index b240de7174..7b8eb44294 100644 --- a/infrastructure/kubernetes/vars/terraform.tfvars +++ b/infrastructure/kubernetes/vars/terraform.tfvars @@ -4,10 +4,15 @@ allowed_account_id = "622152552144" domain = "landgriffon.com" repo_name = "landgriffon" -environments = { +aws_environments = { dev : {}, test : {}, - tetrapack : { - }, + tetrapack : {}, demo : {} } + +gcp_environments = { + gcp : { + load_fresh_data : true + }, +} diff --git a/infrastructure/kubernetes/versions.tf b/infrastructure/kubernetes/versions.tf index 57941db436..8debd954c9 100644 --- a/infrastructure/kubernetes/versions.tf +++ b/infrastructure/kubernetes/versions.tf @@ -33,6 +33,11 @@ terraform { source = "integrations/github" version = "5.17.0" } + + google = { + source = "hashicorp/google" + version = "4.51.0" + } } required_version = "~> 1.3.2" } @@ -42,17 +47,31 @@ provider "aws" { allowed_account_ids = [var.allowed_account_id] } +provider "google" { + region = var.gcp_region + project = var.gcp_project_id +} + +provider "kubernetes" { + alias = "aws_kubernetes" + config_path = "~/.kube/config" + config_context = "aws_landgriffon" +} + provider "kubernetes" { - config_path = "~/.kube/config" + alias = "gcp_kubernetes" + config_path = "~/.kube/config" + config_context = "gcp_landgriffon" } provider "helm" { + alias = "aws_helm" kubernetes { host = "${data.aws_eks_cluster.cluster.endpoint}:4433" cluster_ca_certificate = base64decode(data.aws_eks_cluster.cluster.certificate_authority.0.data) exec { api_version = "client.authentication.k8s.io/v1beta1" - args = [ + args = [ "eks", "get-token", "--cluster-name", @@ -63,7 +82,46 @@ provider "helm" { } } +data "google_client_config" "default" {} + +provider "helm" { + alias = "gcp_helm" + kubernetes { + host = "https://${data.google_container_cluster.cluster.endpoint}" + token = data.google_client_config.default.access_token + cluster_ca_certificate = base64decode(data.google_container_cluster.cluster.master_auth[0].cluster_ca_certificate) + } +} + # https://github.com/integrations/terraform-provider-github/issues/667#issuecomment-1182340862 provider "github" { # owner = "vizzuality" } + +provider "kubectl" { + alias = "aws_kubectl" + + host = "${data.aws_eks_cluster.cluster.endpoint}:4433" + cluster_ca_certificate = base64decode(data.aws_eks_cluster.cluster.certificate_authority.0.data) + exec { + api_version = "client.authentication.k8s.io/v1beta1" + args = [ + "eks", + "get-token", + "--cluster-name", + data.terraform_remote_state.core.outputs.eks_cluster_name + ] + command = "aws" + } +} + +provider "kubectl" { + alias = "gcp_kubectl" + + kubernetes { + host = data.google_container_cluster.cluster.endpoint + token = data.google_client_config.default.access_token + cluster_ca_certificate = base64decode(data.google_container_cluster.cluster.master_auth[0].cluster_ca_certificate) + + } +}