From 40b867748683b4a815bb26b093e9d107051c4416 Mon Sep 17 00:00:00 2001 From: Darkfella91 Date: Sun, 17 Nov 2024 02:36:28 +0200 Subject: [PATCH] Initial commit --- .editorconfig | 23 + .envrc | 8 + .gitattributes | 2 + .github/labeler.yaml | 22 + .github/labels.yaml | 38 + .github/renovate.json5 | 45 + .github/renovate/allowedVersions.json5 | 10 + .github/renovate/autoMerge.json5 | 21 + .github/renovate/clusters.json5 | 10 + .github/renovate/commitMessage.json5 | 16 + .github/renovate/customManagers.json5 | 35 + .github/renovate/grafanaDashboards.json5 | 34 + .github/renovate/groups.json5 | 66 + .github/renovate/labels.json5 | 37 + .github/renovate/packageRules.json5 | 17 + .github/renovate/semanticCommits.json5 | 105 ++ .github/workflows/flux-diff.yaml | 125 ++ .github/workflows/flux-hr-sync.yaml | 98 ++ .github/workflows/flux-image-test.yaml | 152 ++ .github/workflows/label-sync.yaml | 31 + .github/workflows/labeler.yaml | 21 + .github/workflows/release.yaml | 52 + .github/workflows/renovate.yaml | 63 + .gitignore | 11 + .sops.yaml | 26 + .taskfiles/VolSync/Taskfile.yaml | 221 +++ .taskfiles/VolSync/templates/list.tmpl.yaml | 20 + .../replicationdestination.tmpl.yaml | 29 + .taskfiles/VolSync/templates/unlock.tmpl.yaml | 27 + .taskfiles/VolSync/templates/wipe.tmpl.yaml | 26 + .taskfiles/bootstrap/Taskfile.yaml | 91 ++ .vscode/extensions.json | 14 + .vscode/settings.json | 57 + LICENSE | 21 + README.md | 1 + Taskfile.yaml | 20 + .../app/externalsecret.yaml | 24 + .../app/helmrelease.yaml | 28 + .../app/kustomization.yaml | 7 + .../gha-runner-scale-set-controller/ks.yaml | 22 + .../gha-runner-scale-set/app/helmrelease.yaml | 57 + .../app/kustomization.yaml | 7 + .../gha-runner-scale-set/ks.yaml | 20 + .../actions-runner-system/kustomization.yaml | 10 + .../apps/actions-runner-system/namespace.yaml | 38 + .../cert-manager/app/helm-values.yaml | 7 + .../cert-manager/app/helmrelease.yaml | 27 + .../cert-manager/app/kustomization.yaml | 14 + .../cert-manager/app/kustomizeconfig.yaml | 7 + .../cert-manager/app/prometheusrule.yaml | 68 + .../cert-manager/cert-manager/app/rbac.yaml | 24 + .../cert-manager/issuers/externalsecret.yaml | 23 + .../cert-manager/issuers/issuers.yaml | 36 + .../cert-manager/issuers/kustomization.yaml | 7 + .../apps/cert-manager/cert-manager/ks.yaml | 43 + .../certificates/app/certificates.yaml | 29 + .../certificates/app/kustomization.yaml | 7 + .../certificates/app/pushsecret.yaml | 59 + .../import/clusterexternalsecret.yaml | 43 + .../certificates/import/kustomization.yaml | 6 + .../apps/cert-manager/certificates/ks.yaml | 46 + .../main/apps/cert-manager/kustomization.yaml | 10 + .../main/apps/cert-manager/namespace.yaml | 38 + .../cloudnative-pg/app/externalsecret.yaml | 34 + .../cloudnative-pg/app/helmrelease.yaml | 31 + .../cloudnative-pg/app/kustomization.yaml | 7 + .../cloudnative-pg/cluster/cluster17.yaml | 75 + .../cloudnative-pg/cluster/gatus.yaml | 21 + .../cloudnative-pg/cluster/kustomization.yaml | 9 + .../cluster/prometheusrule.yaml | 67 + .../cluster/scheduledbackup.yaml | 12 + .../main/apps/database/cloudnative-pg/ks.yaml | 44 + .../main/apps/database/kustomization.yaml | 10 + kubernetes/main/apps/database/namespace.yaml | 38 + .../database/redis/app/externalsecret.yaml | 19 + .../apps/database/redis/app/helmrelease.yaml | 37 + .../database/redis/app/kustomization.yaml | 7 + kubernetes/main/apps/database/redis/ks.yaml | 22 + .../external-secrets/app/helmrelease.yaml | 43 + .../external-secrets/app/kustomization.yaml | 6 + .../external-secrets/external-secrets/ks.yaml | 42 + .../stores/clustersecretstore.yaml | 19 + .../stores/kustomization.yaml | 7 + .../apps/external-secrets/kustomization.yaml | 9 + .../main/apps/external-secrets/namespace.yaml | 37 + .../flux-system/addons/app/kustomization.yaml | 8 + .../addons/app/monitoring/kustomization.yaml | 8 + .../addons/app/monitoring/podmonitor.yaml | 30 + .../addons/app/monitoring/prometheusrule.yaml | 32 + .../notifications/github/externalsecret.yaml | 19 + .../notifications/github/kustomization.yaml | 7 + .../notifications/github/notification.yaml | 24 + .../app/notifications/kustomization.yaml | 6 + .../app/webhooks/github/externalsecret.yaml | 19 + .../addons/app/webhooks/github/ingress.yaml | 20 + .../app/webhooks/github/kustomization.yaml | 8 + .../addons/app/webhooks/github/receiver.yaml | 26 + .../addons/app/webhooks/kustomization.yaml | 6 + .../main/apps/flux-system/addons/ks.yaml | 22 + .../main/apps/flux-system/kustomization.yaml | 9 + .../main/apps/flux-system/namespace.yaml | 46 + .../apps/keycloak/crds/kustomization.yaml | 6 + .../main/apps/keycloak/deployment/cr.yaml | 63 + .../keycloak/deployment/externalsecret.yaml | 74 + .../apps/keycloak/deployment/ingress.yaml | 31 + .../keycloak/deployment/kustomization.yaml | 8 + kubernetes/main/apps/keycloak/ks.yaml | 66 + .../main/apps/keycloak/kustomization.yaml | 8 + kubernetes/main/apps/keycloak/namespace.yaml | 5 + .../apps/keycloak/operator/kustomization.yaml | 5 + .../kube-system/cilium/app/helm-values.yaml | 55 + .../kube-system/cilium/app/helmrelease.yaml | 78 + .../kube-system/cilium/app/kustomization.yaml | 12 + .../cilium/app/kustomizeconfig.yaml | 7 + .../cilium/config/kustomization.yaml | 7 + .../apps/kube-system/cilium/config/l2.yaml | 24 + .../apps/kube-system/cilium/config/lrp.yaml | 22 + .../main/apps/kube-system/cilium/ks.yaml | 42 + .../kube-system/coredns/app/helm-values.yaml | 53 + .../kube-system/coredns/app/helmrelease.yaml | 27 + .../coredns/app/kustomization.yaml | 12 + .../coredns/app/kustomizeconfig.yaml | 7 + .../main/apps/kube-system/coredns/ks.yaml | 20 + .../descheduler/app/helmrelease.yaml | 64 + .../descheduler/app/kustomization.yaml | 6 + .../main/apps/kube-system/descheduler/ks.yaml | 20 + .../kube-system/generic-device-plugin.yaml | 60 + .../kubelet-csr-approver/app/helm-values.yaml | 4 + .../kubelet-csr-approver/app/helmrelease.yaml | 32 + .../app/kustomization.yaml | 11 + .../app/kustomizeconfig.yaml | 7 + .../kube-system/kubelet-csr-approver/ks.yaml | 19 + .../main/apps/kube-system/kustomization.yaml | 18 + .../lvm-disk-watcher/app/helm-values.yaml | 85 + .../lvm-disk-watcher/app/helmrelease.yaml | 27 + .../lvm-disk-watcher/app/kustomization.yaml | 11 + .../apps/kube-system/lvm-disk-watcher/ks.yaml | 19 + .../metrics-server/app/helmrelease.yaml | 33 + .../metrics-server/app/kustomization.yaml | 6 + .../apps/kube-system/metrics-server/ks.yaml | 20 + .../main/apps/kube-system/namespace.yaml | 38 + .../nvidia-device-plugin/app/helmrelease.yaml | 35 + .../app/kustomization.yaml | 5 + .../config/kustomization.yaml | 5 + .../nvidia-device-plugin/config/runtime.yaml | 6 + .../kube-system/nvidia-device-plugin/ks.yaml | 40 + .../reflector/app/helmrelease.yaml | 18 + .../reflector/app/kustomization.yaml | 6 + .../main/apps/kube-system/reflector/ks.yaml | 20 + .../kube-system/reloader/app/helmrelease.yaml | 33 + .../reloader/app/kustomization.yaml | 6 + .../main/apps/kube-system/reloader/ks.yaml | 20 + .../kube-system/topolvm/app/helm-values.yaml | 24 + .../kube-system/topolvm/app/helmrelease.yaml | 27 + .../topolvm/app/kustomization.yaml | 12 + .../topolvm/app/kustomizeconfig.yaml | 7 + .../main/apps/kube-system/topolvm/ks.yaml | 20 + .../main/apps/kyverno/kustomization.yaml | 9 + .../apps/kyverno/kyverno/app/helmrelease.yaml | 80 + .../kyverno/kyverno/app/kustomization.yaml | 6 + kubernetes/main/apps/kyverno/kyverno/ks.yaml | 42 + .../kyverno/kyverno/policies/dns-config.yaml | 30 + .../kyverno/policies/hostpath-readonly.yaml | 52 + .../kyverno/policies/kustomization.yaml | 7 + kubernetes/main/apps/kyverno/namespace.yaml | 38 + .../media/autobrr/app/externalsecret.yaml | 32 + .../apps/media/autobrr/app/gatus-config.yaml | 24 + .../apps/media/autobrr/app/helmrelease.yaml | 109 ++ .../apps/media/autobrr/app/kustomization.yaml | 17 + .../media/autobrr/app/resources/lokirule.yaml | 14 + kubernetes/main/apps/media/autobrr/ks.yaml | 27 + .../apps/media/bazarr/app/externalsecret.yaml | 358 ++++ .../apps/media/bazarr/app/helmrelease.yaml | 193 +++ .../apps/media/bazarr/app/kustomization.yaml | 18 + .../bazarr/app/resources/connectionpool.py | 1182 ++++++++++++++ .../media/bazarr/app/resources/subcleaner.sh | 18 + kubernetes/main/apps/media/bazarr/ks.yaml | 22 + .../media/flaresolverr/app/helmrelease.yaml | 83 + .../media/flaresolverr/app/kustomization.yaml | 6 + .../main/apps/media/flaresolverr/ks.yaml | 23 + .../media/jellyseerr/app/helmrelease.yaml | 113 ++ .../media/jellyseerr/app/kustomization.yaml | 8 + .../apps/media/jellyseerr/app/volsync.yaml | 47 + kubernetes/main/apps/media/jellyseerr/ks.yaml | 25 + kubernetes/main/apps/media/kustomization.yaml | 21 + kubernetes/main/apps/media/namespace.yaml | 38 + .../media/notifiarr/app/externalsecret.yaml | 33 + .../apps/media/notifiarr/app/helmrelease.yaml | 103 ++ kubernetes/main/apps/media/notifiarr/ks.yaml | 25 + .../media/omegabrr/app/externalsecret.yaml | 40 + .../apps/media/omegabrr/app/helmrelease.yaml | 67 + .../media/omegabrr/app/kustomization.yaml | 8 + kubernetes/main/apps/media/omegabrr/ks.yaml | 25 + .../main/apps/media/plex/app/helmrelease.yaml | 150 ++ .../apps/media/plex/app/kustomization.yaml | 16 + .../media/plex/app/resources/gatus-ep.yaml | 20 + .../media/plex/app/resources/lokirule.yaml | 14 + kubernetes/main/apps/media/plex/ks.yaml | 26 + .../media/prowlarr/app/externalsecret.yaml | 31 + .../apps/media/prowlarr/app/gatus-config.yaml | 24 + .../apps/media/prowlarr/app/helmrelease.yaml | 134 ++ .../media/prowlarr/app/kustomization.yaml | 9 + kubernetes/main/apps/media/prowlarr/ks.yaml | 27 + .../media/qbittorrent/app/externalsecret.yaml | 89 + .../media/qbittorrent/app/helmrelease.yaml | 385 +++++ .../media/qbittorrent/app/kustomization.yaml | 28 + .../qbittorrent/app/resources/dnsdist.conf | 37 + .../qbittorrent/app/resources/gatus-ep.yaml | 23 + .../qbittorrent/app/resources/healthcheck.sh | 10 + .../qbittorrent/app/resources/lokirule.yaml | 14 + .../qbittorrent/app/resources/post-process.sh | 118 ++ .../app/resources/qbitmanage-config.yaml | 304 ++++ .../apps/media/qbittorrent/app/volsync.yaml | 47 + .../main/apps/media/qbittorrent/ks.yaml | 22 + .../apps/media/radarr/app/externalsecret.yaml | 35 + .../apps/media/radarr/app/gatus-config.yaml | 24 + .../apps/media/radarr/app/helmrelease.yaml | 161 ++ .../apps/media/radarr/app/kustomization.yaml | 16 + .../radarr/app/resources/pushover-notify.sh | 85 + kubernetes/main/apps/media/radarr/ks.yaml | 27 + .../media/sabnzbd/app/externalsecret.yaml | 25 + .../apps/media/sabnzbd/app/gatus-config.yaml | 24 + .../apps/media/sabnzbd/app/helmrelease.yaml | 152 ++ .../apps/media/sabnzbd/app/kustomization.yaml | 17 + .../sabnzbd/app/resources/post-process.sh | 118 ++ .../main/apps/media/sabnzbd/app/volsync.yaml | 47 + kubernetes/main/apps/media/sabnzbd/ks.yaml | 26 + .../apps/media/sonarr/app/externalsecret.yaml | 35 + .../apps/media/sonarr/app/gatus-config.yaml | 24 + .../apps/media/sonarr/app/helmrelease.yaml | 155 ++ .../apps/media/sonarr/app/kustomization.yaml | 17 + .../sonarr/app/resources/pushover-notify.sh | 85 + .../sonarr/app/resources/refresh-series.sh | 21 + kubernetes/main/apps/media/sonarr/ks.yaml | 28 + .../media/unpackerr/app/externalsecret.yaml | 20 + .../apps/media/unpackerr/app/helmrelease.yaml | 103 ++ .../media/unpackerr/app/kustomization.yaml | 7 + kubernetes/main/apps/media/unpackerr/ks.yaml | 22 + .../network/cloudflared/app/dnsendpoint.yaml | 11 + .../cloudflared/app/externalsecret.yaml | 24 + .../network/cloudflared/app/helmrelease.yaml | 117 ++ .../cloudflared/app/kustomization.yaml | 14 + .../cloudflared/app/resources/config.yaml | 10 + .../main/apps/network/cloudflared/ks.yaml | 23 + .../network/crowdsec/app/externalsecret.yaml | 105 ++ .../network/crowdsec/app/helmrelease.yaml | 112 ++ .../network/crowdsec/app/kustomization.yaml | 7 + kubernetes/main/apps/network/crowdsec/ks.yaml | 23 + .../network/external-dns/RFC3645/config.yaml | 32 + .../external-dns/RFC3645/helmrelease.yaml | 57 + .../external-dns/RFC3645/kustomization.yaml | 7 + .../cloudflare/externalsecret.yaml | 20 + .../external-dns/cloudflare/helmrelease.yaml | 58 + .../cloudflare/kustomization.yaml | 7 + .../main/apps/network/external-dns/ks.yaml | 44 + .../main/apps/network/kustomization.yaml | 12 + kubernetes/main/apps/network/namespace.yaml | 38 + .../nginx/external/externalsecret.yaml | 67 + .../network/nginx/external/helmrelease.yaml | 179 ++ .../network/nginx/external/kustomization.yaml | 7 + .../network/nginx/internal/helmrelease.yaml | 92 ++ .../network/nginx/internal/kustomization.yaml | 6 + kubernetes/main/apps/network/nginx/ks.yaml | 62 + .../owasp-crs-storage/kustomization.yaml | 16 + .../network/nginx/owasp-crs-storage/pv.yaml | 13 + .../network/nginx/owasp-crs-storage/pvc.yaml | 12 + .../resources/modsecurity.conf | 287 ++++ .../owasp-crs-storage/resources/nginx.yaml | 1439 +++++++++++++++++ .../apps/oauth2-proxy/app/externalsecret.yaml | 25 + .../apps/oauth2-proxy/app/helmrelease.yaml | 447 +++++ .../oauth2-proxy/app/ingress-external.yaml | 20 + .../oauth2-proxy/app/ingress-internal.yaml | 70 + .../apps/oauth2-proxy/app/kustomization.yaml | 9 + kubernetes/main/apps/oauth2-proxy/ks.yaml | 20 + .../main/apps/oauth2-proxy/kustomization.yaml | 8 + .../main/apps/oauth2-proxy/namespace.yaml | 5 + .../gatus/app/externalsecret.yaml | 32 + .../observability/gatus/app/helmrelease.yaml | 138 ++ .../gatus/app/kustomization.yaml | 14 + .../apps/observability/gatus/app/rbac.yaml | 22 + .../gatus/app/resources/config.yaml | 56 + .../main/apps/observability/gatus/ks.yaml | 23 + .../grafana/app/externalsecret.yaml | 40 + .../grafana/app/helmrelease.yaml | 252 +++ .../grafana/app/kustomization.yaml | 7 + .../main/apps/observability/grafana/ks.yaml | 22 + .../app/externalsecret.yaml | 90 ++ .../app/helmrelease.yaml | 135 ++ .../app/kustomization.yaml | 8 + .../app/prometheusrule.yaml | 25 + .../kube-prometheus-stack/ks.yaml | 22 + .../apps/observability/kustomization.yaml | 14 + .../observability/loki/app/helmrelease.yaml | 85 + .../observability/loki/app/kustomization.yaml | 6 + .../main/apps/observability/loki/ks.yaml | 20 + .../main/apps/observability/namespace.yaml | 37 + .../app/helmrelease.yaml | 23 + .../app/kustomization.yaml | 6 + .../prometheus-operator-crds/ks.yaml | 20 + .../promtail/app/helmrelease.yaml | 30 + .../promtail/app/kustomization.yaml | 6 + .../main/apps/observability/promtail/ks.yaml | 20 + .../apps/system-upgrade/kustomization.yaml | 9 + .../main/apps/system-upgrade/namespace.yaml | 38 + .../app/helmrelease.yaml | 101 ++ .../app/kustomization.yaml | 7 + .../system-upgrade-controller/app/rbac.yaml | 21 + .../system-upgrade-controller/ks.yaml | 49 + .../plans/kubernetes.yaml | 45 + .../plans/kustomization.yaml | 7 + .../plans/talos.yaml | 48 + kubernetes/main/apps/vault/kustomization.yaml | 9 + kubernetes/main/apps/vault/namespace.yaml | 5 + .../apps/vault/vault/app/helmrelease.yaml | 452 ++++++ .../apps/vault/vault/app/kustomization.yaml | 6 + kubernetes/main/apps/vault/vault/ks.yaml | 23 + .../main/apps/vaultwarden/kustomization.yaml | 9 + .../main/apps/vaultwarden/namespace.yaml | 35 + .../vaultwarden/app/externalsecret.yaml | 33 + .../vaultwarden/app/helmrelease.yaml | 153 ++ .../vaultwarden/app/kustomization.yaml | 7 + .../vaultwarden/app/resources/gatus-ep.yaml | 20 + .../main/apps/vaultwarden/vaultwarden/ks.yaml | 27 + .../apps/volsync-system/kustomization.yaml | 10 + .../main/apps/volsync-system/namespace.yaml | 38 + .../snapshot-controller/app/helmrelease.yaml | 32 + .../app/kustomization.yaml | 6 + .../config/kustomization.yaml | 6 + .../config/snapshotclass.yaml | 9 + .../snapshot-controller/ks.yaml | 42 + .../volsync/app/helmrelease.yaml | 31 + .../volsync/app/kustomization.yaml | 7 + .../volsync/app/prometheusrule.yaml | 28 + .../main/apps/volsync-system/volsync/ks.yaml | 22 + .../bootstrap/flux/age-key.secret.sops.yaml | 28 + .../flux/deploy-key.secret.sops.yaml | 30 + .../main/bootstrap/flux/kustomization.yaml | 62 + kubernetes/main/bootstrap/helmfile.yaml | 90 ++ .../talos/assets/k8s-0.secret.sops.yaml | 190 +++ .../main/bootstrap/talos/patches/all.yaml | 69 + .../bootstrap/talos/patches/controlplane.yaml | 39 + .../bootstrap/talos/patches/disable_cni.yaml | 6 + .../talos/patches/disk_encryption.yaml | 12 + .../bootstrap/talos/patches/dm_modules.yaml | 6 + .../main/bootstrap/talos/patches/nvidia.yaml | 10 + .../main/bootstrap/talos/talconfig.yaml | 60 + .../main/bootstrap/talos/talsecret.sops.yaml | 43 + kubernetes/main/flux/apps.yaml | 41 + kubernetes/main/flux/config/cluster.yaml | 43 + kubernetes/main/flux/config/flux.yaml | 177 ++ .../main/flux/config/kustomization.yaml | 7 + .../helm/actions-runner-controller.yaml | 11 + .../flux/repositories/helm/authentik.yaml | 10 + .../main/flux/repositories/helm/backube.yaml | 10 + .../main/flux/repositories/helm/bitnami.yaml | 11 + .../main/flux/repositories/helm/bjw-s.yaml | 11 + .../main/flux/repositories/helm/cilium.yaml | 10 + .../repositories/helm/cloudnative-pg.yaml | 10 + .../main/flux/repositories/helm/coredns.yaml | 10 + .../main/flux/repositories/helm/crowdsec.yaml | 10 + .../repositories/helm/csi-driver-nfs.yaml | 10 + .../flux/repositories/helm/descheduler.yaml | 10 + .../flux/repositories/helm/emberstack.yaml | 10 + .../flux/repositories/helm/external-dns.yaml | 10 + .../repositories/helm/external-secrets.yaml | 10 + .../main/flux/repositories/helm/grafana.yaml | 10 + .../flux/repositories/helm/ingress-nginx.yaml | 10 + .../main/flux/repositories/helm/jetstack.yaml | 10 + .../flux/repositories/helm/kustomization.yaml | 35 + .../main/flux/repositories/helm/kyverno.yaml | 11 + .../repositories/helm/metrics-server.yaml | 10 + .../helm/node-feature-discovery.yaml | 10 + .../helm/nvidia-device-plugin.yaml | 10 + .../flux/repositories/helm/oauth2-proxy.yaml | 10 + .../main/flux/repositories/helm/piraeus.yaml | 10 + .../flux/repositories/helm/postfinance.yaml | 10 + .../helm/prometheus-community.yaml | 11 + .../main/flux/repositories/helm/stakater.yaml | 11 + .../main/flux/repositories/helm/topolvm.yaml | 10 + .../flux/repositories/helm/truecharts.yaml | 11 + .../main/flux/repositories/helm/vault.yaml | 10 + .../flux/repositories/helm/vaultwarden.yaml | 10 + .../main/flux/repositories/kustomization.yaml | 8 + .../vars/cluster-secrets.secret.sops.yaml | 39 + .../templates/gatus/external/configmap.yaml | 20 + .../gatus/external/kustomization.yaml | 6 + .../templates/gatus/guarded/configmap.yaml | 24 + .../gatus/guarded/kustomization.yaml | 6 + 388 files changed, 17680 insertions(+) create mode 100644 .editorconfig create mode 100644 .envrc create mode 100644 .gitattributes create mode 100644 .github/labeler.yaml create mode 100644 .github/labels.yaml create mode 100644 .github/renovate.json5 create mode 100644 .github/renovate/allowedVersions.json5 create mode 100644 .github/renovate/autoMerge.json5 create mode 100644 .github/renovate/clusters.json5 create mode 100644 .github/renovate/commitMessage.json5 create mode 100644 .github/renovate/customManagers.json5 create mode 100644 .github/renovate/grafanaDashboards.json5 create mode 100644 .github/renovate/groups.json5 create mode 100644 .github/renovate/labels.json5 create mode 100644 .github/renovate/packageRules.json5 create mode 100644 .github/renovate/semanticCommits.json5 create mode 100644 .github/workflows/flux-diff.yaml create mode 100644 .github/workflows/flux-hr-sync.yaml create mode 100644 .github/workflows/flux-image-test.yaml create mode 100644 .github/workflows/label-sync.yaml create mode 100644 .github/workflows/labeler.yaml create mode 100644 .github/workflows/release.yaml create mode 100644 .github/workflows/renovate.yaml create mode 100644 .gitignore create mode 100644 .sops.yaml create mode 100644 .taskfiles/VolSync/Taskfile.yaml create mode 100644 .taskfiles/VolSync/templates/list.tmpl.yaml create mode 100644 .taskfiles/VolSync/templates/replicationdestination.tmpl.yaml create mode 100644 .taskfiles/VolSync/templates/unlock.tmpl.yaml create mode 100644 .taskfiles/VolSync/templates/wipe.tmpl.yaml create mode 100644 .taskfiles/bootstrap/Taskfile.yaml create mode 100644 .vscode/extensions.json create mode 100644 .vscode/settings.json create mode 100644 LICENSE create mode 100644 README.md create mode 100644 Taskfile.yaml create mode 100644 kubernetes/main/apps/actions-runner-system/gha-runner-scale-set-controller/app/externalsecret.yaml create mode 100644 kubernetes/main/apps/actions-runner-system/gha-runner-scale-set-controller/app/helmrelease.yaml create mode 100644 kubernetes/main/apps/actions-runner-system/gha-runner-scale-set-controller/app/kustomization.yaml create mode 100644 kubernetes/main/apps/actions-runner-system/gha-runner-scale-set-controller/ks.yaml create mode 100644 kubernetes/main/apps/actions-runner-system/gha-runner-scale-set/app/helmrelease.yaml create mode 100644 kubernetes/main/apps/actions-runner-system/gha-runner-scale-set/app/kustomization.yaml create mode 100644 kubernetes/main/apps/actions-runner-system/gha-runner-scale-set/ks.yaml create mode 100644 kubernetes/main/apps/actions-runner-system/kustomization.yaml create mode 100644 kubernetes/main/apps/actions-runner-system/namespace.yaml create mode 100644 kubernetes/main/apps/cert-manager/cert-manager/app/helm-values.yaml create mode 100644 kubernetes/main/apps/cert-manager/cert-manager/app/helmrelease.yaml create mode 100644 kubernetes/main/apps/cert-manager/cert-manager/app/kustomization.yaml create mode 100644 kubernetes/main/apps/cert-manager/cert-manager/app/kustomizeconfig.yaml create mode 100644 kubernetes/main/apps/cert-manager/cert-manager/app/prometheusrule.yaml create mode 100644 kubernetes/main/apps/cert-manager/cert-manager/app/rbac.yaml create mode 100644 kubernetes/main/apps/cert-manager/cert-manager/issuers/externalsecret.yaml create mode 100644 kubernetes/main/apps/cert-manager/cert-manager/issuers/issuers.yaml create mode 100644 kubernetes/main/apps/cert-manager/cert-manager/issuers/kustomization.yaml create mode 100644 kubernetes/main/apps/cert-manager/cert-manager/ks.yaml create mode 100644 kubernetes/main/apps/cert-manager/certificates/app/certificates.yaml create mode 100644 kubernetes/main/apps/cert-manager/certificates/app/kustomization.yaml create mode 100644 kubernetes/main/apps/cert-manager/certificates/app/pushsecret.yaml create mode 100644 kubernetes/main/apps/cert-manager/certificates/import/clusterexternalsecret.yaml create mode 100644 kubernetes/main/apps/cert-manager/certificates/import/kustomization.yaml create mode 100644 kubernetes/main/apps/cert-manager/certificates/ks.yaml create mode 100644 kubernetes/main/apps/cert-manager/kustomization.yaml create mode 100644 kubernetes/main/apps/cert-manager/namespace.yaml create mode 100644 kubernetes/main/apps/database/cloudnative-pg/app/externalsecret.yaml create mode 100644 kubernetes/main/apps/database/cloudnative-pg/app/helmrelease.yaml create mode 100644 kubernetes/main/apps/database/cloudnative-pg/app/kustomization.yaml create mode 100644 kubernetes/main/apps/database/cloudnative-pg/cluster/cluster17.yaml create mode 100644 kubernetes/main/apps/database/cloudnative-pg/cluster/gatus.yaml create mode 100644 kubernetes/main/apps/database/cloudnative-pg/cluster/kustomization.yaml create mode 100644 kubernetes/main/apps/database/cloudnative-pg/cluster/prometheusrule.yaml create mode 100644 kubernetes/main/apps/database/cloudnative-pg/cluster/scheduledbackup.yaml create mode 100644 kubernetes/main/apps/database/cloudnative-pg/ks.yaml create mode 100644 kubernetes/main/apps/database/kustomization.yaml create mode 100644 kubernetes/main/apps/database/namespace.yaml create mode 100644 kubernetes/main/apps/database/redis/app/externalsecret.yaml create mode 100644 kubernetes/main/apps/database/redis/app/helmrelease.yaml create mode 100644 kubernetes/main/apps/database/redis/app/kustomization.yaml create mode 100644 kubernetes/main/apps/database/redis/ks.yaml create mode 100644 kubernetes/main/apps/external-secrets/external-secrets/app/helmrelease.yaml create mode 100644 kubernetes/main/apps/external-secrets/external-secrets/app/kustomization.yaml create mode 100644 kubernetes/main/apps/external-secrets/external-secrets/ks.yaml create mode 100644 kubernetes/main/apps/external-secrets/external-secrets/stores/clustersecretstore.yaml create mode 100644 kubernetes/main/apps/external-secrets/external-secrets/stores/kustomization.yaml create mode 100644 kubernetes/main/apps/external-secrets/kustomization.yaml create mode 100644 kubernetes/main/apps/external-secrets/namespace.yaml create mode 100644 kubernetes/main/apps/flux-system/addons/app/kustomization.yaml create mode 100644 kubernetes/main/apps/flux-system/addons/app/monitoring/kustomization.yaml create mode 100644 kubernetes/main/apps/flux-system/addons/app/monitoring/podmonitor.yaml create mode 100644 kubernetes/main/apps/flux-system/addons/app/monitoring/prometheusrule.yaml create mode 100644 kubernetes/main/apps/flux-system/addons/app/notifications/github/externalsecret.yaml create mode 100644 kubernetes/main/apps/flux-system/addons/app/notifications/github/kustomization.yaml create mode 100644 kubernetes/main/apps/flux-system/addons/app/notifications/github/notification.yaml create mode 100644 kubernetes/main/apps/flux-system/addons/app/notifications/kustomization.yaml create mode 100644 kubernetes/main/apps/flux-system/addons/app/webhooks/github/externalsecret.yaml create mode 100644 kubernetes/main/apps/flux-system/addons/app/webhooks/github/ingress.yaml create mode 100644 kubernetes/main/apps/flux-system/addons/app/webhooks/github/kustomization.yaml create mode 100644 kubernetes/main/apps/flux-system/addons/app/webhooks/github/receiver.yaml create mode 100644 kubernetes/main/apps/flux-system/addons/app/webhooks/kustomization.yaml create mode 100644 kubernetes/main/apps/flux-system/addons/ks.yaml create mode 100644 kubernetes/main/apps/flux-system/kustomization.yaml create mode 100644 kubernetes/main/apps/flux-system/namespace.yaml create mode 100644 kubernetes/main/apps/keycloak/crds/kustomization.yaml create mode 100644 kubernetes/main/apps/keycloak/deployment/cr.yaml create mode 100644 kubernetes/main/apps/keycloak/deployment/externalsecret.yaml create mode 100644 kubernetes/main/apps/keycloak/deployment/ingress.yaml create mode 100644 kubernetes/main/apps/keycloak/deployment/kustomization.yaml create mode 100644 kubernetes/main/apps/keycloak/ks.yaml create mode 100644 kubernetes/main/apps/keycloak/kustomization.yaml create mode 100644 kubernetes/main/apps/keycloak/namespace.yaml create mode 100644 kubernetes/main/apps/keycloak/operator/kustomization.yaml create mode 100644 kubernetes/main/apps/kube-system/cilium/app/helm-values.yaml create mode 100644 kubernetes/main/apps/kube-system/cilium/app/helmrelease.yaml create mode 100644 kubernetes/main/apps/kube-system/cilium/app/kustomization.yaml create mode 100644 kubernetes/main/apps/kube-system/cilium/app/kustomizeconfig.yaml create mode 100644 kubernetes/main/apps/kube-system/cilium/config/kustomization.yaml create mode 100644 kubernetes/main/apps/kube-system/cilium/config/l2.yaml create mode 100644 kubernetes/main/apps/kube-system/cilium/config/lrp.yaml create mode 100644 kubernetes/main/apps/kube-system/cilium/ks.yaml create mode 100644 kubernetes/main/apps/kube-system/coredns/app/helm-values.yaml create mode 100644 kubernetes/main/apps/kube-system/coredns/app/helmrelease.yaml create mode 100644 kubernetes/main/apps/kube-system/coredns/app/kustomization.yaml create mode 100644 kubernetes/main/apps/kube-system/coredns/app/kustomizeconfig.yaml create mode 100644 kubernetes/main/apps/kube-system/coredns/ks.yaml create mode 100644 kubernetes/main/apps/kube-system/descheduler/app/helmrelease.yaml create mode 100644 kubernetes/main/apps/kube-system/descheduler/app/kustomization.yaml create mode 100644 kubernetes/main/apps/kube-system/descheduler/ks.yaml create mode 100644 kubernetes/main/apps/kube-system/generic-device-plugin.yaml create mode 100644 kubernetes/main/apps/kube-system/kubelet-csr-approver/app/helm-values.yaml create mode 100644 kubernetes/main/apps/kube-system/kubelet-csr-approver/app/helmrelease.yaml create mode 100644 kubernetes/main/apps/kube-system/kubelet-csr-approver/app/kustomization.yaml create mode 100644 kubernetes/main/apps/kube-system/kubelet-csr-approver/app/kustomizeconfig.yaml create mode 100644 kubernetes/main/apps/kube-system/kubelet-csr-approver/ks.yaml create mode 100644 kubernetes/main/apps/kube-system/kustomization.yaml create mode 100644 kubernetes/main/apps/kube-system/lvm-disk-watcher/app/helm-values.yaml create mode 100644 kubernetes/main/apps/kube-system/lvm-disk-watcher/app/helmrelease.yaml create mode 100644 kubernetes/main/apps/kube-system/lvm-disk-watcher/app/kustomization.yaml create mode 100644 kubernetes/main/apps/kube-system/lvm-disk-watcher/ks.yaml create mode 100644 kubernetes/main/apps/kube-system/metrics-server/app/helmrelease.yaml create mode 100644 kubernetes/main/apps/kube-system/metrics-server/app/kustomization.yaml create mode 100644 kubernetes/main/apps/kube-system/metrics-server/ks.yaml create mode 100644 kubernetes/main/apps/kube-system/namespace.yaml create mode 100644 kubernetes/main/apps/kube-system/nvidia-device-plugin/app/helmrelease.yaml create mode 100644 kubernetes/main/apps/kube-system/nvidia-device-plugin/app/kustomization.yaml create mode 100644 kubernetes/main/apps/kube-system/nvidia-device-plugin/config/kustomization.yaml create mode 100644 kubernetes/main/apps/kube-system/nvidia-device-plugin/config/runtime.yaml create mode 100644 kubernetes/main/apps/kube-system/nvidia-device-plugin/ks.yaml create mode 100644 kubernetes/main/apps/kube-system/reflector/app/helmrelease.yaml create mode 100644 kubernetes/main/apps/kube-system/reflector/app/kustomization.yaml create mode 100644 kubernetes/main/apps/kube-system/reflector/ks.yaml create mode 100644 kubernetes/main/apps/kube-system/reloader/app/helmrelease.yaml create mode 100644 kubernetes/main/apps/kube-system/reloader/app/kustomization.yaml create mode 100644 kubernetes/main/apps/kube-system/reloader/ks.yaml create mode 100644 kubernetes/main/apps/kube-system/topolvm/app/helm-values.yaml create mode 100644 kubernetes/main/apps/kube-system/topolvm/app/helmrelease.yaml create mode 100644 kubernetes/main/apps/kube-system/topolvm/app/kustomization.yaml create mode 100644 kubernetes/main/apps/kube-system/topolvm/app/kustomizeconfig.yaml create mode 100644 kubernetes/main/apps/kube-system/topolvm/ks.yaml create mode 100644 kubernetes/main/apps/kyverno/kustomization.yaml create mode 100644 kubernetes/main/apps/kyverno/kyverno/app/helmrelease.yaml create mode 100644 kubernetes/main/apps/kyverno/kyverno/app/kustomization.yaml create mode 100644 kubernetes/main/apps/kyverno/kyverno/ks.yaml create mode 100644 kubernetes/main/apps/kyverno/kyverno/policies/dns-config.yaml create mode 100644 kubernetes/main/apps/kyverno/kyverno/policies/hostpath-readonly.yaml create mode 100644 kubernetes/main/apps/kyverno/kyverno/policies/kustomization.yaml create mode 100644 kubernetes/main/apps/kyverno/namespace.yaml create mode 100644 kubernetes/main/apps/media/autobrr/app/externalsecret.yaml create mode 100644 kubernetes/main/apps/media/autobrr/app/gatus-config.yaml create mode 100644 kubernetes/main/apps/media/autobrr/app/helmrelease.yaml create mode 100644 kubernetes/main/apps/media/autobrr/app/kustomization.yaml create mode 100644 kubernetes/main/apps/media/autobrr/app/resources/lokirule.yaml create mode 100644 kubernetes/main/apps/media/autobrr/ks.yaml create mode 100644 kubernetes/main/apps/media/bazarr/app/externalsecret.yaml create mode 100644 kubernetes/main/apps/media/bazarr/app/helmrelease.yaml create mode 100644 kubernetes/main/apps/media/bazarr/app/kustomization.yaml create mode 100644 kubernetes/main/apps/media/bazarr/app/resources/connectionpool.py create mode 100644 kubernetes/main/apps/media/bazarr/app/resources/subcleaner.sh create mode 100644 kubernetes/main/apps/media/bazarr/ks.yaml create mode 100644 kubernetes/main/apps/media/flaresolverr/app/helmrelease.yaml create mode 100644 kubernetes/main/apps/media/flaresolverr/app/kustomization.yaml create mode 100644 kubernetes/main/apps/media/flaresolverr/ks.yaml create mode 100644 kubernetes/main/apps/media/jellyseerr/app/helmrelease.yaml create mode 100644 kubernetes/main/apps/media/jellyseerr/app/kustomization.yaml create mode 100644 kubernetes/main/apps/media/jellyseerr/app/volsync.yaml create mode 100644 kubernetes/main/apps/media/jellyseerr/ks.yaml create mode 100644 kubernetes/main/apps/media/kustomization.yaml create mode 100644 kubernetes/main/apps/media/namespace.yaml create mode 100644 kubernetes/main/apps/media/notifiarr/app/externalsecret.yaml create mode 100644 kubernetes/main/apps/media/notifiarr/app/helmrelease.yaml create mode 100644 kubernetes/main/apps/media/notifiarr/ks.yaml create mode 100644 kubernetes/main/apps/media/omegabrr/app/externalsecret.yaml create mode 100644 kubernetes/main/apps/media/omegabrr/app/helmrelease.yaml create mode 100644 kubernetes/main/apps/media/omegabrr/app/kustomization.yaml create mode 100644 kubernetes/main/apps/media/omegabrr/ks.yaml create mode 100644 kubernetes/main/apps/media/plex/app/helmrelease.yaml create mode 100644 kubernetes/main/apps/media/plex/app/kustomization.yaml create mode 100644 kubernetes/main/apps/media/plex/app/resources/gatus-ep.yaml create mode 100644 kubernetes/main/apps/media/plex/app/resources/lokirule.yaml create mode 100644 kubernetes/main/apps/media/plex/ks.yaml create mode 100644 kubernetes/main/apps/media/prowlarr/app/externalsecret.yaml create mode 100644 kubernetes/main/apps/media/prowlarr/app/gatus-config.yaml create mode 100644 kubernetes/main/apps/media/prowlarr/app/helmrelease.yaml create mode 100644 kubernetes/main/apps/media/prowlarr/app/kustomization.yaml create mode 100644 kubernetes/main/apps/media/prowlarr/ks.yaml create mode 100644 kubernetes/main/apps/media/qbittorrent/app/externalsecret.yaml create mode 100644 kubernetes/main/apps/media/qbittorrent/app/helmrelease.yaml create mode 100644 kubernetes/main/apps/media/qbittorrent/app/kustomization.yaml create mode 100644 kubernetes/main/apps/media/qbittorrent/app/resources/dnsdist.conf create mode 100644 kubernetes/main/apps/media/qbittorrent/app/resources/gatus-ep.yaml create mode 100644 kubernetes/main/apps/media/qbittorrent/app/resources/healthcheck.sh create mode 100644 kubernetes/main/apps/media/qbittorrent/app/resources/lokirule.yaml create mode 100644 kubernetes/main/apps/media/qbittorrent/app/resources/post-process.sh create mode 100644 kubernetes/main/apps/media/qbittorrent/app/resources/qbitmanage-config.yaml create mode 100644 kubernetes/main/apps/media/qbittorrent/app/volsync.yaml create mode 100644 kubernetes/main/apps/media/qbittorrent/ks.yaml create mode 100644 kubernetes/main/apps/media/radarr/app/externalsecret.yaml create mode 100644 kubernetes/main/apps/media/radarr/app/gatus-config.yaml create mode 100644 kubernetes/main/apps/media/radarr/app/helmrelease.yaml create mode 100644 kubernetes/main/apps/media/radarr/app/kustomization.yaml create mode 100644 kubernetes/main/apps/media/radarr/app/resources/pushover-notify.sh create mode 100644 kubernetes/main/apps/media/radarr/ks.yaml create mode 100644 kubernetes/main/apps/media/sabnzbd/app/externalsecret.yaml create mode 100644 kubernetes/main/apps/media/sabnzbd/app/gatus-config.yaml create mode 100644 kubernetes/main/apps/media/sabnzbd/app/helmrelease.yaml create mode 100644 kubernetes/main/apps/media/sabnzbd/app/kustomization.yaml create mode 100644 kubernetes/main/apps/media/sabnzbd/app/resources/post-process.sh create mode 100644 kubernetes/main/apps/media/sabnzbd/app/volsync.yaml create mode 100644 kubernetes/main/apps/media/sabnzbd/ks.yaml create mode 100644 kubernetes/main/apps/media/sonarr/app/externalsecret.yaml create mode 100644 kubernetes/main/apps/media/sonarr/app/gatus-config.yaml create mode 100644 kubernetes/main/apps/media/sonarr/app/helmrelease.yaml create mode 100644 kubernetes/main/apps/media/sonarr/app/kustomization.yaml create mode 100644 kubernetes/main/apps/media/sonarr/app/resources/pushover-notify.sh create mode 100644 kubernetes/main/apps/media/sonarr/app/resources/refresh-series.sh create mode 100644 kubernetes/main/apps/media/sonarr/ks.yaml create mode 100644 kubernetes/main/apps/media/unpackerr/app/externalsecret.yaml create mode 100644 kubernetes/main/apps/media/unpackerr/app/helmrelease.yaml create mode 100644 kubernetes/main/apps/media/unpackerr/app/kustomization.yaml create mode 100644 kubernetes/main/apps/media/unpackerr/ks.yaml create mode 100644 kubernetes/main/apps/network/cloudflared/app/dnsendpoint.yaml create mode 100644 kubernetes/main/apps/network/cloudflared/app/externalsecret.yaml create mode 100644 kubernetes/main/apps/network/cloudflared/app/helmrelease.yaml create mode 100644 kubernetes/main/apps/network/cloudflared/app/kustomization.yaml create mode 100644 kubernetes/main/apps/network/cloudflared/app/resources/config.yaml create mode 100644 kubernetes/main/apps/network/cloudflared/ks.yaml create mode 100644 kubernetes/main/apps/network/crowdsec/app/externalsecret.yaml create mode 100644 kubernetes/main/apps/network/crowdsec/app/helmrelease.yaml create mode 100644 kubernetes/main/apps/network/crowdsec/app/kustomization.yaml create mode 100644 kubernetes/main/apps/network/crowdsec/ks.yaml create mode 100644 kubernetes/main/apps/network/external-dns/RFC3645/config.yaml create mode 100644 kubernetes/main/apps/network/external-dns/RFC3645/helmrelease.yaml create mode 100644 kubernetes/main/apps/network/external-dns/RFC3645/kustomization.yaml create mode 100644 kubernetes/main/apps/network/external-dns/cloudflare/externalsecret.yaml create mode 100644 kubernetes/main/apps/network/external-dns/cloudflare/helmrelease.yaml create mode 100644 kubernetes/main/apps/network/external-dns/cloudflare/kustomization.yaml create mode 100644 kubernetes/main/apps/network/external-dns/ks.yaml create mode 100644 kubernetes/main/apps/network/kustomization.yaml create mode 100644 kubernetes/main/apps/network/namespace.yaml create mode 100644 kubernetes/main/apps/network/nginx/external/externalsecret.yaml create mode 100644 kubernetes/main/apps/network/nginx/external/helmrelease.yaml create mode 100644 kubernetes/main/apps/network/nginx/external/kustomization.yaml create mode 100644 kubernetes/main/apps/network/nginx/internal/helmrelease.yaml create mode 100644 kubernetes/main/apps/network/nginx/internal/kustomization.yaml create mode 100644 kubernetes/main/apps/network/nginx/ks.yaml create mode 100644 kubernetes/main/apps/network/nginx/owasp-crs-storage/kustomization.yaml create mode 100644 kubernetes/main/apps/network/nginx/owasp-crs-storage/pv.yaml create mode 100644 kubernetes/main/apps/network/nginx/owasp-crs-storage/pvc.yaml create mode 100644 kubernetes/main/apps/network/nginx/owasp-crs-storage/resources/modsecurity.conf create mode 100644 kubernetes/main/apps/network/nginx/owasp-crs-storage/resources/nginx.yaml create mode 100644 kubernetes/main/apps/oauth2-proxy/app/externalsecret.yaml create mode 100644 kubernetes/main/apps/oauth2-proxy/app/helmrelease.yaml create mode 100644 kubernetes/main/apps/oauth2-proxy/app/ingress-external.yaml create mode 100644 kubernetes/main/apps/oauth2-proxy/app/ingress-internal.yaml create mode 100644 kubernetes/main/apps/oauth2-proxy/app/kustomization.yaml create mode 100644 kubernetes/main/apps/oauth2-proxy/ks.yaml create mode 100644 kubernetes/main/apps/oauth2-proxy/kustomization.yaml create mode 100644 kubernetes/main/apps/oauth2-proxy/namespace.yaml create mode 100644 kubernetes/main/apps/observability/gatus/app/externalsecret.yaml create mode 100644 kubernetes/main/apps/observability/gatus/app/helmrelease.yaml create mode 100644 kubernetes/main/apps/observability/gatus/app/kustomization.yaml create mode 100644 kubernetes/main/apps/observability/gatus/app/rbac.yaml create mode 100644 kubernetes/main/apps/observability/gatus/app/resources/config.yaml create mode 100644 kubernetes/main/apps/observability/gatus/ks.yaml create mode 100644 kubernetes/main/apps/observability/grafana/app/externalsecret.yaml create mode 100644 kubernetes/main/apps/observability/grafana/app/helmrelease.yaml create mode 100644 kubernetes/main/apps/observability/grafana/app/kustomization.yaml create mode 100644 kubernetes/main/apps/observability/grafana/ks.yaml create mode 100644 kubernetes/main/apps/observability/kube-prometheus-stack/app/externalsecret.yaml create mode 100644 kubernetes/main/apps/observability/kube-prometheus-stack/app/helmrelease.yaml create mode 100644 kubernetes/main/apps/observability/kube-prometheus-stack/app/kustomization.yaml create mode 100644 kubernetes/main/apps/observability/kube-prometheus-stack/app/prometheusrule.yaml create mode 100644 kubernetes/main/apps/observability/kube-prometheus-stack/ks.yaml create mode 100644 kubernetes/main/apps/observability/kustomization.yaml create mode 100644 kubernetes/main/apps/observability/loki/app/helmrelease.yaml create mode 100644 kubernetes/main/apps/observability/loki/app/kustomization.yaml create mode 100644 kubernetes/main/apps/observability/loki/ks.yaml create mode 100644 kubernetes/main/apps/observability/namespace.yaml create mode 100644 kubernetes/main/apps/observability/prometheus-operator-crds/app/helmrelease.yaml create mode 100644 kubernetes/main/apps/observability/prometheus-operator-crds/app/kustomization.yaml create mode 100644 kubernetes/main/apps/observability/prometheus-operator-crds/ks.yaml create mode 100644 kubernetes/main/apps/observability/promtail/app/helmrelease.yaml create mode 100644 kubernetes/main/apps/observability/promtail/app/kustomization.yaml create mode 100644 kubernetes/main/apps/observability/promtail/ks.yaml create mode 100644 kubernetes/main/apps/system-upgrade/kustomization.yaml create mode 100644 kubernetes/main/apps/system-upgrade/namespace.yaml create mode 100644 kubernetes/main/apps/system-upgrade/system-upgrade-controller/app/helmrelease.yaml create mode 100644 kubernetes/main/apps/system-upgrade/system-upgrade-controller/app/kustomization.yaml create mode 100644 kubernetes/main/apps/system-upgrade/system-upgrade-controller/app/rbac.yaml create mode 100644 kubernetes/main/apps/system-upgrade/system-upgrade-controller/ks.yaml create mode 100644 kubernetes/main/apps/system-upgrade/system-upgrade-controller/plans/kubernetes.yaml create mode 100644 kubernetes/main/apps/system-upgrade/system-upgrade-controller/plans/kustomization.yaml create mode 100644 kubernetes/main/apps/system-upgrade/system-upgrade-controller/plans/talos.yaml create mode 100644 kubernetes/main/apps/vault/kustomization.yaml create mode 100644 kubernetes/main/apps/vault/namespace.yaml create mode 100644 kubernetes/main/apps/vault/vault/app/helmrelease.yaml create mode 100644 kubernetes/main/apps/vault/vault/app/kustomization.yaml create mode 100644 kubernetes/main/apps/vault/vault/ks.yaml create mode 100644 kubernetes/main/apps/vaultwarden/kustomization.yaml create mode 100644 kubernetes/main/apps/vaultwarden/namespace.yaml create mode 100644 kubernetes/main/apps/vaultwarden/vaultwarden/app/externalsecret.yaml create mode 100644 kubernetes/main/apps/vaultwarden/vaultwarden/app/helmrelease.yaml create mode 100644 kubernetes/main/apps/vaultwarden/vaultwarden/app/kustomization.yaml create mode 100644 kubernetes/main/apps/vaultwarden/vaultwarden/app/resources/gatus-ep.yaml create mode 100644 kubernetes/main/apps/vaultwarden/vaultwarden/ks.yaml create mode 100644 kubernetes/main/apps/volsync-system/kustomization.yaml create mode 100644 kubernetes/main/apps/volsync-system/namespace.yaml create mode 100644 kubernetes/main/apps/volsync-system/snapshot-controller/app/helmrelease.yaml create mode 100644 kubernetes/main/apps/volsync-system/snapshot-controller/app/kustomization.yaml create mode 100644 kubernetes/main/apps/volsync-system/snapshot-controller/config/kustomization.yaml create mode 100644 kubernetes/main/apps/volsync-system/snapshot-controller/config/snapshotclass.yaml create mode 100644 kubernetes/main/apps/volsync-system/snapshot-controller/ks.yaml create mode 100644 kubernetes/main/apps/volsync-system/volsync/app/helmrelease.yaml create mode 100644 kubernetes/main/apps/volsync-system/volsync/app/kustomization.yaml create mode 100644 kubernetes/main/apps/volsync-system/volsync/app/prometheusrule.yaml create mode 100644 kubernetes/main/apps/volsync-system/volsync/ks.yaml create mode 100644 kubernetes/main/bootstrap/flux/age-key.secret.sops.yaml create mode 100644 kubernetes/main/bootstrap/flux/deploy-key.secret.sops.yaml create mode 100644 kubernetes/main/bootstrap/flux/kustomization.yaml create mode 100644 kubernetes/main/bootstrap/helmfile.yaml create mode 100644 kubernetes/main/bootstrap/talos/assets/k8s-0.secret.sops.yaml create mode 100644 kubernetes/main/bootstrap/talos/patches/all.yaml create mode 100644 kubernetes/main/bootstrap/talos/patches/controlplane.yaml create mode 100644 kubernetes/main/bootstrap/talos/patches/disable_cni.yaml create mode 100644 kubernetes/main/bootstrap/talos/patches/disk_encryption.yaml create mode 100644 kubernetes/main/bootstrap/talos/patches/dm_modules.yaml create mode 100644 kubernetes/main/bootstrap/talos/patches/nvidia.yaml create mode 100644 kubernetes/main/bootstrap/talos/talconfig.yaml create mode 100644 kubernetes/main/bootstrap/talos/talsecret.sops.yaml create mode 100644 kubernetes/main/flux/apps.yaml create mode 100644 kubernetes/main/flux/config/cluster.yaml create mode 100644 kubernetes/main/flux/config/flux.yaml create mode 100644 kubernetes/main/flux/config/kustomization.yaml create mode 100644 kubernetes/main/flux/repositories/helm/actions-runner-controller.yaml create mode 100644 kubernetes/main/flux/repositories/helm/authentik.yaml create mode 100644 kubernetes/main/flux/repositories/helm/backube.yaml create mode 100644 kubernetes/main/flux/repositories/helm/bitnami.yaml create mode 100644 kubernetes/main/flux/repositories/helm/bjw-s.yaml create mode 100644 kubernetes/main/flux/repositories/helm/cilium.yaml create mode 100644 kubernetes/main/flux/repositories/helm/cloudnative-pg.yaml create mode 100644 kubernetes/main/flux/repositories/helm/coredns.yaml create mode 100644 kubernetes/main/flux/repositories/helm/crowdsec.yaml create mode 100644 kubernetes/main/flux/repositories/helm/csi-driver-nfs.yaml create mode 100644 kubernetes/main/flux/repositories/helm/descheduler.yaml create mode 100644 kubernetes/main/flux/repositories/helm/emberstack.yaml create mode 100644 kubernetes/main/flux/repositories/helm/external-dns.yaml create mode 100644 kubernetes/main/flux/repositories/helm/external-secrets.yaml create mode 100644 kubernetes/main/flux/repositories/helm/grafana.yaml create mode 100644 kubernetes/main/flux/repositories/helm/ingress-nginx.yaml create mode 100644 kubernetes/main/flux/repositories/helm/jetstack.yaml create mode 100644 kubernetes/main/flux/repositories/helm/kustomization.yaml create mode 100644 kubernetes/main/flux/repositories/helm/kyverno.yaml create mode 100644 kubernetes/main/flux/repositories/helm/metrics-server.yaml create mode 100644 kubernetes/main/flux/repositories/helm/node-feature-discovery.yaml create mode 100644 kubernetes/main/flux/repositories/helm/nvidia-device-plugin.yaml create mode 100644 kubernetes/main/flux/repositories/helm/oauth2-proxy.yaml create mode 100644 kubernetes/main/flux/repositories/helm/piraeus.yaml create mode 100644 kubernetes/main/flux/repositories/helm/postfinance.yaml create mode 100644 kubernetes/main/flux/repositories/helm/prometheus-community.yaml create mode 100644 kubernetes/main/flux/repositories/helm/stakater.yaml create mode 100644 kubernetes/main/flux/repositories/helm/topolvm.yaml create mode 100644 kubernetes/main/flux/repositories/helm/truecharts.yaml create mode 100644 kubernetes/main/flux/repositories/helm/vault.yaml create mode 100644 kubernetes/main/flux/repositories/helm/vaultwarden.yaml create mode 100644 kubernetes/main/flux/repositories/kustomization.yaml create mode 100644 kubernetes/main/flux/vars/cluster-secrets.secret.sops.yaml create mode 100644 kubernetes/main/templates/gatus/external/configmap.yaml create mode 100644 kubernetes/main/templates/gatus/external/kustomization.yaml create mode 100644 kubernetes/main/templates/gatus/guarded/configmap.yaml create mode 100644 kubernetes/main/templates/gatus/guarded/kustomization.yaml diff --git a/.editorconfig b/.editorconfig new file mode 100644 index 000000000..6e40cb65c --- /dev/null +++ b/.editorconfig @@ -0,0 +1,23 @@ +; https://editorconfig.org/ + +root = true + +[*] +indent_style = space +indent_size = 2 +end_of_line = lf +charset = utf-8 +trim_trailing_whitespace = true +insert_final_newline = true + +[{Makefile,go.mod,go.sum,*.go,.gitmodules}] +indent_style = tab +indent_size = 4 + +[*.md] +indent_size = 4 +trim_trailing_whitespace = false + +[{Dockerfile,*.bash,*.sh}] +indent_style = space +indent_size = 4 diff --git a/.envrc b/.envrc new file mode 100644 index 000000000..9291d771d --- /dev/null +++ b/.envrc @@ -0,0 +1,8 @@ +#shellcheck disable=SC2148,SC2155 +# Kubernetes +export KUBECONFIG="$(expand_path ./kubernetes/main/kubeconfig)" +export SOPS_AGE_KEY_FILE="$(expand_path ./age.key)" +export TALOSCONFIG="$(expand_path ./kubernetes/main/talosconfig)" +# Taskfile +export TASK_X_ENV_PRECEDENCE=1 +export TASK_X_MAP_VARIABLES=0 diff --git a/.gitattributes b/.gitattributes new file mode 100644 index 000000000..3f5563f4c --- /dev/null +++ b/.gitattributes @@ -0,0 +1,2 @@ +* text=auto eol=lf +*.sops.* diff=sopsdiffer diff --git a/.github/labeler.yaml b/.github/labeler.yaml new file mode 100644 index 000000000..af6ba72e0 --- /dev/null +++ b/.github/labeler.yaml @@ -0,0 +1,22 @@ +--- +# Areas +area/docs: + - changed-files: + - any-glob-to-any-file: + - "docs/**/*" + - "README.md" +area/github: + - changed-files: + - any-glob-to-any-file: ".github/**/*" +area/kubernetes: + - changed-files: + - any-glob-to-any-file: "kubernetes/**/*" +area/taskfile: + - changed-files: + - any-glob-to-any-file: + - ".taskfiles/**/*" + - "Taskfile.yaml" +# Clusters +cluster/main: + - changed-files: + - any-glob-to-any-file: "kubernetes/main/**/*" diff --git a/.github/labels.yaml b/.github/labels.yaml new file mode 100644 index 000000000..86f42d1d9 --- /dev/null +++ b/.github/labels.yaml @@ -0,0 +1,38 @@ +--- +# Areas +- name: area/docs + color: "0e8a16" +- name: area/github + color: "0e8a16" +- name: area/kubernetes + color: "0e8a16" +- name: area/taskfile + color: "0e8a16" +# Clusters +- name: cluster/main + color: "ffc300" +# Renovate Types +- name: renovate/container + color: "027fa0" +- name: renovate/github-action + color: "027fa0" +- name: renovate/grafana-dashboard + color: "027fa0" +- name: renovate/github-release + color: "027fa0" +- name: renovate/helm + color: "027fa0" +# Semantic Types +- name: type/digest + color: "ffeC19" +- name: type/patch + color: "ffeC19" +- name: type/minor + color: "ff9800" +- name: type/major + color: "f6412d" +# Uncategorized +- name: community + color: "370fb2" +- name: hold + color: "ee0701" diff --git a/.github/renovate.json5 b/.github/renovate.json5 new file mode 100644 index 000000000..7b2591d82 --- /dev/null +++ b/.github/renovate.json5 @@ -0,0 +1,45 @@ +{ + "$schema": "https://docs.renovatebot.com/renovate-schema.json", + "extends": [ + "config:recommended", + "docker:enableMajor", + "replacements:k8s-registry-move", + ":automergeBranch", + ":disableRateLimiting", + ":dependencyDashboard", + ":semanticCommits", + ":skipStatusChecks", + ":timezone(Europe/Sofia)", + "github>Darkfella91/home-ops//.github/renovate/allowedVersions.json5", + "github>Darkfella91/home-ops//.github/renovate/autoMerge.json5", + "github>Darkfella91/home-ops//.github/renovate/clusters.json5", + "github>Darkfella91/home-ops//.github/renovate/commitMessage.json5", + "github>Darkfella91/home-ops//.github/renovate/customManagers.json5", + "github>Darkfella91/home-ops//.github/renovate/grafanaDashboards.json5", + "github>Darkfella91/home-ops//.github/renovate/groups.json5", + "github>Darkfella91/home-ops//.github/renovate/labels.json5", + "github>Darkfella91/home-ops//.github/renovate/packageRules.json5", + "github>Darkfella91/home-ops//.github/renovate/semanticCommits.json5" + ], + "dependencyDashboardTitle": "Renovate Dashboard 🤖", + "suppressNotifications": ["prEditedNotification", "prIgnoreNotification"], + "onboarding": false, + "requireConfig": "ignored", + "ignorePaths": ["**/*.sops.*", "**/.archive/**", "**/resources/**"], + "flux": { + "fileMatch": [ + "(^|/)kubernetes/.+\\.ya?ml(?:\\.j2)?$" + ] + }, + "helm-values": { + "fileMatch": [ + "(^|/)kubernetes/.+\\.ya?ml(?:\\.j2)?$" + ] + }, + "kubernetes": { + "fileMatch": [ + "(^|/)\\.taskfiles/.+\\.ya?ml(?:\\.j2)?$", + "(^|/)kubernetes/.+\\.ya?ml(?:\\.j2)?$" + ] + } +} diff --git a/.github/renovate/allowedVersions.json5 b/.github/renovate/allowedVersions.json5 new file mode 100644 index 000000000..f02bf47a5 --- /dev/null +++ b/.github/renovate/allowedVersions.json5 @@ -0,0 +1,10 @@ +{ + "$schema": "https://docs.renovatebot.com/renovate-schema.json", + "packageRules": [ + { + "matchDatasources": ["docker"], + "matchPackagePatterns": ["postgresql"], + "allowedVersions": "<18" + } + ] +} diff --git a/.github/renovate/autoMerge.json5 b/.github/renovate/autoMerge.json5 new file mode 100644 index 000000000..73d3cdc17 --- /dev/null +++ b/.github/renovate/autoMerge.json5 @@ -0,0 +1,21 @@ +{ + "$schema": "https://docs.renovatebot.com/renovate-schema.json", + "packageRules": [ + { + "description": ["Auto-merge container digests updates for trusted containers"], + "matchDatasources": ["docker"], + "automerge": true, + "automergeType": "branch", + "matchUpdateTypes": ["digest"], + "matchPackagePatterns": ["ghcr.io/bjw-s", "ghcr.io/onedr0p"] + }, + { + "description": ["Auto-merge GitHub Actions for minor and patch"], + "matchManagers": ["github-actions"], + "matchDatasources": ["github-tags"], + "automerge": true, + "automergeType": "branch", + "matchUpdateTypes": ["minor", "patch"] + } + ] +} diff --git a/.github/renovate/clusters.json5 b/.github/renovate/clusters.json5 new file mode 100644 index 000000000..7ceb227b0 --- /dev/null +++ b/.github/renovate/clusters.json5 @@ -0,0 +1,10 @@ +{ + "$schema": "https://docs.renovatebot.com/renovate-schema.json", + "packageRules": [ + { + "description": ["Separate PRs for main cluster"], + "matchFileNames": ["**/kubernetes/main/**"], + "additionalBranchPrefix": "main-" + } + ] +} diff --git a/.github/renovate/commitMessage.json5 b/.github/renovate/commitMessage.json5 new file mode 100644 index 000000000..3fea62872 --- /dev/null +++ b/.github/renovate/commitMessage.json5 @@ -0,0 +1,16 @@ +{ + "$schema": "https://docs.renovatebot.com/renovate-schema.json", + "commitMessageTopic": "{{depName}}", + "commitMessageExtra": "to {{newVersion}}", + "commitMessageSuffix": "", + "packageRules": [ + { + "matchDatasources": ["helm"], + "commitMessageTopic": "chart {{depName}}" + }, + { + "matchDatasources": ["docker"], + "commitMessageTopic": "image {{depName}}" + } + ] +} diff --git a/.github/renovate/customManagers.json5 b/.github/renovate/customManagers.json5 new file mode 100644 index 000000000..47da2e311 --- /dev/null +++ b/.github/renovate/customManagers.json5 @@ -0,0 +1,35 @@ +{ + "$schema": "https://docs.renovatebot.com/renovate-schema.json", + "customManagers": [ + { + "customType": "regex", + "description": ["Process YAML custom dependencies"], + "fileMatch": [ + "(^|/)kubernetes/.+\\.env$", + "(^|/)kubernetes/.+\\.ya?ml$" + ], + "matchStrings": [ + // # renovate: datasource=github-releases depName=k3s-io/k3s + // k3s_release_version: &version v1.29.0+k3s1 + // # renovate: datasource=helm depName=cilium repository=https://helm.cilium.io + // version: 1.15.1 + // # renovate: datasource=docker depName=ghcr.io/siderolabs/kubelet + // KUBERNETES_VERSION=v1.31.1 + "datasource=(?\\S+) depName=(?\\S+)( repository=(?\\S+))?\\n.+(:\\s|=)(&\\S+\\s)?(?\\S+)", + // # renovate: datasource=github-releases depName=rancher/system-upgrade-controller + // https://github.com/rancher/system-upgrade-controller/releases/download/v0.13.2/crd.yaml + "datasource=(?\\S+) depName=(?\\S+)\\n.+/(?(v|\\d)[^/]+)", + "datasource=(?\\S+) depName=(?\\S+)( repository=(?\\S+))?\n.+?\"(?\\S+)\"" + ], + "datasourceTemplate": "{{#if datasource}}{{{datasource}}}{{else}}github-releases{{/if}}" + }, + { + "customType": "regex", + "description": ["Process CloudnativePG Postgresql version"], + "fileMatch": ["(^|/)kubernetes/.+\\.ya?ml$"], + "matchStrings": ["imageName: (?\\S+):(?.*\\-.*)"], + "datasourceTemplate": "docker", + "versioningTemplate": "redhat" + } + ] +} diff --git a/.github/renovate/grafanaDashboards.json5 b/.github/renovate/grafanaDashboards.json5 new file mode 100644 index 000000000..580d288db --- /dev/null +++ b/.github/renovate/grafanaDashboards.json5 @@ -0,0 +1,34 @@ +{ + "$schema": "https://docs.renovatebot.com/renovate-schema.json", + "customDatasources": { + "grafana-dashboards": { + "defaultRegistryUrlTemplate": "https://grafana.com/api/dashboards/{{packageName}}", + "format": "json", + "transformTemplates": ["{\"releases\":[{\"version\": $string(revision)}]}"] + } + }, + "customManagers": [ + { + "customType": "regex", + "description": ["Process Grafana dashboards"], + "fileMatch": ["(^|/)kubernetes/.+\\.ya?ml$"], + "matchStrings": ["depName=\"(?.*)\"\\n(?\\s+)gnetId: (?\\d+)\\n.+revision: (?\\d+)"], + "autoReplaceStringTemplate": "depName=\"{{{depName}}}\"\n{{{indentation}}}gnetId: {{{packageName}}}\n{{{indentation}}}revision: {{{newValue}}}", + "datasourceTemplate": "custom.grafana-dashboards", + "versioningTemplate": "regex:^(?\\d+)$" + } + ], + "packageRules": [ + { + "addLabels": ["renovate/grafana-dashboard"], + "automerge": true, + "automergeType": "branch", + "matchDatasources": ["custom.grafana-dashboards"], + "matchUpdateTypes": ["major"], + "semanticCommitType": "chore", + "semanticCommitScope": "grafana-dashboards", + "commitMessageTopic": "dashboard {{depName}}", + "commitMessageExtra": "( {{currentVersion}} → {{newVersion}} )" + } + ] +} diff --git a/.github/renovate/groups.json5 b/.github/renovate/groups.json5 new file mode 100644 index 000000000..79a05f8e7 --- /dev/null +++ b/.github/renovate/groups.json5 @@ -0,0 +1,66 @@ +{ + "$schema": "https://docs.renovatebot.com/renovate-schema.json", + "packageRules": [ + { + "description": ["1Password Connect Group"], + "groupName": "1Password Connnect", + "matchPackagePatterns": ["1password/connect"], + "matchDatasources": ["docker"], + "group": { + "commitMessageTopic": "{{{groupName}}} group" + }, + "separateMinorPatch": true + }, + { + "description": ["Actions Runner Controller Group"], + "groupName": "Actions Runner Controller", + "matchPackagePatterns": ["gha-runner-scale-set"], + "matchDatasources": ["docker", "helm"], + "group": { + "commitMessageTopic": "{{{groupName}}} group" + }, + "separateMinorPatch": true + }, + { + "description": ["Flux Group"], + "groupName": "Flux", + "matchPackagePatterns": ["fluxcd"], + "matchDatasources": ["docker", "github-tags"], + "versioning": "semver", + "group": { + "commitMessageTopic": "{{{groupName}}} group" + }, + "separateMinorPatch": true + }, + { + "description": ["Intel Device Plugins Group"], + "groupName": "Intel-Device-Plugins", + "matchPackagePatterns": ["intel-device-plugins"], + "matchDatasources": ["helm"], + "group": { + "commitMessageTopic": "{{{groupName}}} group" + }, + "separateMinorPatch": true + }, + { + "description": ["Rook-Ceph Group"], + "groupName": "Rook-Ceph", + "matchPackagePatterns": ["rook.ceph"], + "matchDatasources": ["helm"], + "group": { + "commitMessageTopic": "{{{groupName}}} group" + }, + "separateMinorPatch": true + }, + { + "description": ["Talos Group"], + "groupName": "Talos", + "matchPackagePatterns": ["siderolabs/talosctl", "siderolabs/installer"], + "matchDatasources": ["docker"], + "group": { + "commitMessageTopic": "{{{groupName}}} group" + }, + "separateMinorPatch": true + } + ] +} diff --git a/.github/renovate/labels.json5 b/.github/renovate/labels.json5 new file mode 100644 index 000000000..641ea6e98 --- /dev/null +++ b/.github/renovate/labels.json5 @@ -0,0 +1,37 @@ +{ + "$schema": "https://docs.renovatebot.com/renovate-schema.json", + "packageRules": [ + { + "matchUpdateTypes": ["major"], + "labels": ["type/major"] + }, + { + "matchUpdateTypes": ["minor"], + "labels": ["type/minor"] + }, + { + "matchUpdateTypes": ["patch"], + "labels": ["type/patch"] + }, + { + "matchUpdateTypes": ["digest"], + "labels": ["type/digest"] + }, + { + "matchDatasources": ["docker"], + "addLabels": ["renovate/container"] + }, + { + "matchDatasources": ["helm"], + "addLabels": ["renovate/helm"] + }, + { + "matchDatasources": ["github-releases", "github-tags"], + "addLabels": ["renovate/github-release"] + }, + { + "matchManagers": ["github-actions"], + "addLabels": ["renovate/github-action"] + } + ] +} diff --git a/.github/renovate/packageRules.json5 b/.github/renovate/packageRules.json5 new file mode 100644 index 000000000..8ccd48652 --- /dev/null +++ b/.github/renovate/packageRules.json5 @@ -0,0 +1,17 @@ +{ + "$schema": "https://docs.renovatebot.com/renovate-schema.json", + "packageRules": [ + { + "description": ["Loose versioning for non-semver packages"], + "matchDatasources": ["docker"], + "matchPackagePatterns": ["cross-seed", "plex"], + "versioning": "loose" + }, + { + "description": ["Custom schedule for frequently updated packages"], + "matchDataSources": ["docker", "helm"], + "matchPackagePatterns": ["postgresql", "reloader"], + "schedule": ["on the first day of the month"] + } + ] +} diff --git a/.github/renovate/semanticCommits.json5 b/.github/renovate/semanticCommits.json5 new file mode 100644 index 000000000..0d88d8db6 --- /dev/null +++ b/.github/renovate/semanticCommits.json5 @@ -0,0 +1,105 @@ +{ + "$schema": "https://docs.renovatebot.com/renovate-schema.json", + "packageRules": [ + { + "matchDatasources": ["docker"], + "matchUpdateTypes": ["major"], + "commitMessagePrefix": "feat(container)!: ", + "commitMessageTopic": "{{depName}}", + "commitMessageExtra": " ( {{currentVersion}} → {{newVersion}} )" + }, + { + "matchDatasources": ["docker"], + "matchUpdateTypes": ["minor"], + "semanticCommitType": "feat", + "semanticCommitScope": "container", + "commitMessageTopic": "{{depName}}", + "commitMessageExtra": "( {{currentVersion}} → {{newVersion}} )" + }, + { + "matchDatasources": ["docker"], + "matchUpdateTypes": ["patch"], + "semanticCommitType": "fix", + "semanticCommitScope": "container", + "commitMessageTopic": "{{depName}}", + "commitMessageExtra": "( {{currentVersion}} → {{newVersion}} )" + }, + { + "matchDatasources": ["docker"], + "matchUpdateTypes": ["digest"], + "semanticCommitType": "chore", + "semanticCommitScope": "container", + "commitMessageTopic": "{{depName}}", + "commitMessageExtra": "( {{currentDigestShort}} → {{newDigestShort}} )" + }, + { + "matchDatasources": ["helm"], + "matchUpdateTypes": ["major"], + "commitMessagePrefix": "feat(helm)!: ", + "commitMessageTopic": "{{depName}}", + "commitMessageExtra": "( {{currentVersion}} → {{newVersion}} )" + }, + { + "matchDatasources": ["helm"], + "matchUpdateTypes": ["minor"], + "semanticCommitType": "feat", + "semanticCommitScope": "helm", + "commitMessageTopic": "{{depName}}", + "commitMessageExtra": "( {{currentVersion}} → {{newVersion}} )" + }, + { + "matchDatasources": ["helm"], + "matchUpdateTypes": ["patch"], + "semanticCommitType": "fix", + "semanticCommitScope": "helm", + "commitMessageTopic": "{{depName}}", + "commitMessageExtra": "( {{currentVersion}} → {{newVersion}} )" + }, + { + "matchDatasources": ["github-releases", "github-tags"], + "matchUpdateTypes": ["major"], + "commitMessagePrefix": "feat(github-release)!: ", + "commitMessageTopic": "{{depName}}", + "commitMessageExtra": "( {{currentVersion}} → {{newVersion}} )" + }, + { + "matchDatasources": ["github-releases", "github-tags"], + "matchUpdateTypes": ["minor"], + "semanticCommitType": "feat", + "semanticCommitScope": "github-release", + "commitMessageTopic": "{{depName}}", + "commitMessageExtra": "( {{currentVersion}} → {{newVersion}} )" + }, + { + "matchDatasources": ["github-releases", "github-tags"], + "matchUpdateTypes": ["patch"], + "semanticCommitType": "fix", + "semanticCommitScope": "github-release", + "commitMessageTopic": "{{depName}}", + "commitMessageExtra": "( {{currentVersion}} → {{newVersion}} )" + }, + { + "matchManagers": ["github-actions"], + "matchUpdateTypes": ["major"], + "commitMessagePrefix": "feat(github-action)!: ", + "commitMessageTopic": "{{depName}}", + "commitMessageExtra": "( {{currentVersion}} → {{newVersion}} )" + }, + { + "matchManagers": ["github-actions"], + "matchUpdateTypes": ["minor"], + "semanticCommitType": "feat", + "semanticCommitScope": "github-action", + "commitMessageTopic": "{{depName}}", + "commitMessageExtra": "( {{currentVersion}} → {{newVersion}} )" + }, + { + "matchManagers": ["github-actions"], + "matchUpdateTypes": ["patch"], + "semanticCommitType": "fix", + "semanticCommitScope": "github-action", + "commitMessageTopic": "{{depName}}", + "commitMessageExtra": "( {{currentVersion}} → {{newVersion}} )" + } + ] +} diff --git a/.github/workflows/flux-diff.yaml b/.github/workflows/flux-diff.yaml new file mode 100644 index 000000000..1877c56e6 --- /dev/null +++ b/.github/workflows/flux-diff.yaml @@ -0,0 +1,125 @@ +--- +# yaml-language-server: $schema=https://json.schemastore.org/github-workflow.json +name: "Flux Diff" + +on: + pull_request: + branches: ["main"] + paths: ["kubernetes/**"] + +concurrency: + group: ${{ github.workflow }}-${{ github.event.number || github.ref }} + cancel-in-progress: true + +jobs: + changed-clusters: + name: Changed Clusters + runs-on: ubuntu-latest + outputs: + matrix: ${{ steps.changed-clusters.outputs.all_changed_and_modified_files }} + steps: + - name: Generate Token + uses: actions/create-github-app-token@v1 + id: app-token + with: + app-id: "${{ secrets.BOT_APP_ID }}" + private-key: "${{ secrets.BOT_APP_PRIVATE_KEY }}" + + - name: Checkout Default Branch + uses: actions/checkout@v4 + with: + token: "${{ steps.app-token.outputs.token }}" + fetch-depth: 0 + + - name: Get Changed Clusters + id: changed-clusters + uses: tj-actions/changed-files@v45 + with: + files: kubernetes/** + dir_names: true + dir_names_max_depth: 2 + matrix: true + + - name: List All Changed Clusters + run: echo "${{ steps.changed-clusters.outputs.all_changed_and_modified_files }}" + + flux-diff: + name: Flux Diff + runs-on: ubuntu-latest + needs: ["changed-clusters"] + permissions: + pull-requests: write + strategy: + matrix: + paths: ${{ fromJSON(needs.changed-clusters.outputs.matrix) }} + resources: ["helmrelease", "kustomization"] + max-parallel: 4 + fail-fast: false + steps: + - name: Generate Token + uses: actions/create-github-app-token@v1 + id: app-token + with: + app-id: "${{ secrets.BOT_APP_ID }}" + private-key: "${{ secrets.BOT_APP_PRIVATE_KEY }}" + + - name: Checkout + uses: actions/checkout@v4 + with: + token: "${{ steps.app-token.outputs.token }}" + path: pull + + - name: Checkout Default Branch + uses: actions/checkout@v4 + with: + token: "${{ steps.app-token.outputs.token }}" + ref: "${{ github.event.repository.default_branch }}" + path: default + + - name: Diff Resources + uses: docker://ghcr.io/allenporter/flux-local:v6.0.2 + with: + args: >- + diff ${{ matrix.resources }} + --unified 6 + --path /github/workspace/pull/${{ matrix.paths }}/flux + --path-orig /github/workspace/default/${{ matrix.paths }}/flux + --strip-attrs "helm.sh/chart,checksum/config,app.kubernetes.io/version,chart" + --limit-bytes 10000 + --all-namespaces + --sources "home-kubernetes" + --output-file diff.patch + + - name: Generate Diff + id: diff + run: | + echo "diff<> $GITHUB_OUTPUT + cat diff.patch >> $GITHUB_OUTPUT + echo "EOF" >> $GITHUB_OUTPUT + echo "### Diff" >> $GITHUB_STEP_SUMMARY + echo '```diff' >> $GITHUB_STEP_SUMMARY + cat diff.patch >> $GITHUB_STEP_SUMMARY + echo '```' >> $GITHUB_STEP_SUMMARY + + - if: ${{ steps.diff.outputs.diff != '' }} + name: Add comment + uses: mshick/add-pr-comment@v2 + with: + repo-token: "${{ steps.app-token.outputs.token }}" + message-id: "${{ github.event.pull_request.number }}/${{ matrix.paths }}/${{ matrix.resources }}" + message-failure: Diff was not successful + message: | + ```diff + ${{ steps.diff.outputs.diff }} + ``` + + # Summarize matrix https://github.community/t/status-check-for-a-matrix-jobs/127354/7 + flux-diff-success: + if: ${{ always() }} + needs: ["flux-diff"] + name: Flux Diff Successful + runs-on: ubuntu-latest + steps: + - if: ${{ contains(needs.*.result, 'failure') || contains(needs.*.result, 'cancelled') }} + name: Check matrix status + run: exit 1 diff --git a/.github/workflows/flux-hr-sync.yaml b/.github/workflows/flux-hr-sync.yaml new file mode 100644 index 000000000..67887c0d9 --- /dev/null +++ b/.github/workflows/flux-hr-sync.yaml @@ -0,0 +1,98 @@ +--- +# yaml-language-server: $schema=https://json.schemastore.org/github-workflow.json +name: "Flux Helm Repository Sync" + +on: + workflow_dispatch: + inputs: + clusterName: + description: Cluster Name + default: main + required: true + helmRepoNamespace: + description: Helm Repository Namespace + default: flux-system + required: true + helmRepoName: + description: Helm Repository Name + required: true + pull_request: + branches: ["main"] + paths: ["kubernetes/**/helmrelease.yaml"] + +jobs: + sync: + name: Flux Helm Repository Sync + runs-on: ["gha-runner-scale-set"] + steps: + - name: Generate Token + uses: actions/create-github-app-token@v1 + id: app-token + with: + app-id: "${{ secrets.BOT_APP_ID }}" + private-key: "${{ secrets.BOT_APP_PRIVATE_KEY }}" + + - name: Checkout + uses: actions/checkout@v4 + with: + token: "${{ steps.app-token.outputs.token }}" + fetch-depth: 0 + + - name: Setup Homebrew + uses: Homebrew/actions/setup-homebrew@master + + - name: Setup Workflow Tools + shell: bash + run: brew install fluxcd/tap/flux yq + + - name: Write kubeconfig + id: kubeconfig + uses: timheuer/base64-to-file@v1 + with: + encodedString: "${{ secrets.KUBECONFIG }}" + fileName: kubeconfig + + - if: ${{ github.event.inputs.clusterName == '' && github.event.inputs.helmRepoNamespace == '' && github.event.inputs.helmRepoName == '' }} + name: Get Changed Files + id: changed-files + uses: tj-actions/changed-files@v45 + with: + files: kubernetes/**/helmrelease.yaml + safe_output: false + + - if: ${{ github.event.inputs.clusterName == '' && github.event.inputs.helmRepoNamespace == '' && github.event.inputs.helmRepoName == '' }} + name: List All Changed Files + run: echo "${{ steps.changed-files.outputs.all_changed_and_modified_files }}" + + - if: ${{ github.event.inputs.clusterName == '' && github.event.inputs.helmRepoNamespace == '' && github.event.inputs.helmRepoName == '' }} + name: Sync Helm Repository + env: + KUBECONFIG: "${{ steps.kubeconfig.outputs.filePath }}" + shell: bash + run: | + declare -a repos=() + for f in ${{ steps.changed-files.outputs.all_changed_and_modified_files }}; do + cluster_name=$(echo "${f}" | awk -F'/' '{print $2}') + repo_namespace="$(yq -r '.spec.chart.spec.sourceRef.namespace' "${f}")" + repo_name="$(yq -r '.spec.chart.spec.sourceRef.name' "${f}")" + repos+=("${cluster_name}:${repo_namespace}:${repo_name}") + done + mapfile -t repos < <(printf "%s\n" "${repos[@]}" | sort -u) + for r in "${repos[@]}"; do + IFS=':' read -r cluster_name repo_namespace repo_name <<< "${r}" + flux \ + --context ${cluster_name} \ + --namespace ${repo_namespace} \ + reconcile source helm ${repo_name} + done + + - if: ${{ github.event.inputs.clusterName != '' && github.event.inputs.helmRepoNamespace != '' && github.event.inputs.helmRepoName != '' }} + name: Sync Helm Repository + env: + KUBECONFIG: ${{ steps.kubeconfig.outputs.filePath }} + shell: bash + run: | + flux \ + --context ${{ github.event.inputs.clusterName }} \ + --namespace ${{ github.event.inputs.helmRepoNamespace }} \ + reconcile source helm ${{ github.event.inputs.helmRepoName }} diff --git a/.github/workflows/flux-image-test.yaml b/.github/workflows/flux-image-test.yaml new file mode 100644 index 000000000..e00efac8d --- /dev/null +++ b/.github/workflows/flux-image-test.yaml @@ -0,0 +1,152 @@ +--- +# yaml-language-server: $schema=https://json.schemastore.org/github-workflow.json +name: "Flux Image Test" + +on: + pull_request: + branches: ["main"] + paths: ["kubernetes/**"] + +concurrency: + group: ${{ github.workflow }}-${{ github.event.number || github.ref }} + cancel-in-progress: true + +jobs: + changed-clusters: + name: Changed Clusters + runs-on: ubuntu-latest + outputs: + matrix: ${{ steps.changed-clusters.outputs.all_changed_and_modified_files }} + steps: + - name: Generate Token + uses: actions/create-github-app-token@v1 + id: app-token + with: + app-id: "${{ secrets.BOT_APP_ID }}" + private-key: "${{ secrets.BOT_APP_PRIVATE_KEY }}" + + - name: Checkout + uses: actions/checkout@v4 + with: + token: "${{ steps.app-token.outputs.token }}" + fetch-depth: 0 + + - name: Get Changed Clusters + id: changed-clusters + uses: tj-actions/changed-files@v45 + with: + files: kubernetes/** + dir_names: true + dir_names_max_depth: 2 + matrix: true + + - name: List All Changed Clusters + run: echo "${{ steps.changed-clusters.outputs.all_changed_and_modified_files }}" + + extract-images: + name: Extract Images + runs-on: ubuntu-latest + needs: ["changed-clusters"] + permissions: + pull-requests: write + strategy: + matrix: + paths: ${{ fromJSON(needs.changed-clusters.outputs.matrix) }} + max-parallel: 4 + fail-fast: false + outputs: + matrix: ${{ steps.extract-images.outputs.images }} + steps: + - name: Generate Token + uses: actions/create-github-app-token@v1 + id: app-token + with: + app-id: "${{ secrets.BOT_APP_ID }}" + private-key: "${{ secrets.BOT_APP_PRIVATE_KEY }}" + + - name: Setup Homebrew + uses: Homebrew/actions/setup-homebrew@master + + - name: Setup Workflow Tools + shell: bash + run: brew install jo yq + + - name: Checkout Default Branch + uses: actions/checkout@v4 + with: + token: "${{ steps.app-token.outputs.token }}" + ref: "${{ github.event.repository.default_branch }}" + path: default + + - name: Checkout Pull Request Branch + uses: actions/checkout@v4 + with: + token: "${{ steps.app-token.outputs.token }}" + path: pull + + - name: Gather Images in Default Branch + uses: docker://ghcr.io/allenporter/flux-local:v6.0.2 + with: + args: >- + get cluster + --path /github/workspace/default/${{ matrix.paths }}/flux + --enable-images + --output yaml + --output-file default.yaml + + - name: Gather Images in Pull Request Branch + uses: docker://ghcr.io/allenporter/flux-local:v6.0.2 + with: + args: >- + get cluster + --path /github/workspace/pull/${{ matrix.paths }}/flux + --enable-images + --output yaml + --output-file pull.yaml + + - name: Filter Default Branch Results + shell: bash + run: | + yq -r '[.. | .images? | select(. != null)] | flatten | sort | unique | .[]' \ + default.yaml > default.txt + + - name: Filter Pull Request Branch Results + shell: bash + run: | + yq -r '[.. | .images? | select(. != null)] | flatten | sort | unique | .[]' \ + pull.yaml > pull.txt + + - name: Compare Default and Pull Request Images + id: extract-images + shell: bash + run: | + images=$(jo -a $(grep -vf default.txt pull.txt)) + echo "images=${images}" >> $GITHUB_OUTPUT + echo "${images}" + echo "### Images" >> $GITHUB_STEP_SUMMARY + echo "${images}" | jq -r 'to_entries[] | "* \(.value)"' >> $GITHUB_STEP_SUMMARY + + test-images: + if: ${{ needs.extract-images.outputs.matrix != '[]' }} + name: Test images + runs-on: ubuntu-latest + needs: ["extract-images"] + strategy: + matrix: + images: ${{ fromJSON(needs.extract-images.outputs.matrix) }} + max-parallel: 4 + fail-fast: false + steps: + - name: Inspect Image + run: docker buildx imagetools inspect ${{ matrix.images }} + + # Summarize matrix https://github.community/t/status-check-for-a-matrix-jobs/127354/7 + test-images-success: + if: ${{ always() }} + needs: ["test-images"] + name: Test Images Successful + runs-on: ubuntu-latest + steps: + - if: ${{ contains(needs.*.result, 'failure') || contains(needs.*.result, 'cancelled') }} + name: Check matrix status + run: exit 1 diff --git a/.github/workflows/label-sync.yaml b/.github/workflows/label-sync.yaml new file mode 100644 index 000000000..d1eab2683 --- /dev/null +++ b/.github/workflows/label-sync.yaml @@ -0,0 +1,31 @@ +--- +# yaml-language-server: $schema=https://json.schemastore.org/github-workflow.json +name: "Label Sync" + +on: + workflow_dispatch: + push: + branches: ["main"] + paths: [".github/labels.yaml"] + schedule: + - cron: "0 0 * * *" # Every day at midnight + +permissions: + issues: write + contents: read + +jobs: + label-sync: + name: Label Sync + runs-on: ubuntu-latest + steps: + - name: Checkout + uses: actions/checkout@v4 + with: + sparse-checkout: .github/labels.yaml + + - name: Sync Labels + uses: EndBug/label-sync@v2 + with: + config-file: .github/labels.yaml + delete-other-labels: true diff --git a/.github/workflows/labeler.yaml b/.github/workflows/labeler.yaml new file mode 100644 index 000000000..d658c1d96 --- /dev/null +++ b/.github/workflows/labeler.yaml @@ -0,0 +1,21 @@ +--- +# yaml-language-server: $schema=https://json.schemastore.org/github-workflow.json +name: "Labeler" + +on: + workflow_dispatch: + pull_request_target: + branches: ["main"] + +jobs: + labeler: + name: Labeler + runs-on: ubuntu-latest + permissions: + contents: read + pull-requests: write + steps: + - name: Labeler + uses: actions/labeler@v5 + with: + configuration-path: .github/labeler.yaml diff --git a/.github/workflows/release.yaml b/.github/workflows/release.yaml new file mode 100644 index 000000000..ab809acf3 --- /dev/null +++ b/.github/workflows/release.yaml @@ -0,0 +1,52 @@ +--- +# yaml-language-server: $schema=https://json.schemastore.org/github-workflow.json +name: "Release" + +on: + workflow_dispatch: + schedule: + - cron: "0 0 1 * *" # 1st of every month at midnight + +jobs: + release: + name: Release + runs-on: ubuntu-latest + steps: + - name: Generate Token + uses: actions/create-github-app-token@v1 + id: app-token + with: + app-id: "${{ secrets.BOT_APP_ID }}" + private-key: "${{ secrets.BOT_APP_PRIVATE_KEY }}" + + - name: Checkout + uses: actions/checkout@v4 + with: + token: "${{ steps.app-token.outputs.token }}" + + - name: Create Release + shell: bash + env: + GITHUB_TOKEN: "${{ steps.app-token.outputs.token }}" + run: | + # Retrieve previous release tag + previous_tag="$(gh release list --limit 1 | awk '{ print $1 }')" + previous_major="${previous_tag%%\.*}" + previous_minor="${previous_tag#*.}" + previous_minor="${previous_minor%.*}" + previous_patch="${previous_tag##*.}" + # Determine next release tag + next_major_minor="$(date +'%Y').$(date +'%-m')" + if [[ "${previous_major}.${previous_minor}" == "${next_major_minor}" ]]; then + echo "Month release already exists for year, incrementing patch number by 1" + next_patch="$((previous_patch + 1))" + else + echo "Month release does not exist for year, setting patch number to 0" + next_patch="0" + fi + # Create release + release_tag="${next_major_minor}.${next_patch}" + gh release create "${release_tag}" \ + --repo="${GITHUB_REPOSITORY}" \ + --title="${release_tag}" \ + --generate-notes diff --git a/.github/workflows/renovate.yaml b/.github/workflows/renovate.yaml new file mode 100644 index 000000000..39d379f69 --- /dev/null +++ b/.github/workflows/renovate.yaml @@ -0,0 +1,63 @@ +--- +# yaml-language-server: $schema=https://json.schemastore.org/github-workflow.json +name: "Renovate" + +on: + workflow_dispatch: + inputs: + dryRun: + description: Dry Run + default: "false" + required: false + logLevel: + description: Log Level + default: debug + required: false + version: + description: Renovate version + default: latest + required: false + schedule: + - cron: "0 * * * *" # Every hour + push: + branches: ["main"] + paths: + - .github/renovate.json5 + - .github/renovate/**.json5 + +concurrency: + group: ${{ github.workflow }}-${{ github.event.number || github.ref }} + cancel-in-progress: true + +env: + LOG_LEVEL: "${{ inputs.logLevel || 'debug' }}" + RENOVATE_AUTODISCOVER: true + RENOVATE_AUTODISCOVER_FILTER: "${{ github.repository }}" + RENOVATE_DRY_RUN: "${{ inputs.dryRun == true }}" + RENOVATE_PLATFORM: github + RENOVATE_PLATFORM_COMMIT: true + WORKFLOW_RENOVATE_VERSION: "${{ inputs.version || 'latest' }}" + +jobs: + renovate: + name: Renovate + runs-on: ubuntu-latest + steps: + - name: Generate Token + uses: actions/create-github-app-token@v1 + id: app-token + with: + app-id: "${{ secrets.BOT_APP_ID }}" + private-key: "${{ secrets.BOT_APP_PRIVATE_KEY }}" + + - name: Checkout + uses: actions/checkout@v4 + with: + token: "${{ steps.app-token.outputs.token }}" + + - name: Renovate + uses: renovatebot/github-action@v41.0.3 + with: + configurationFile: .github/renovate.json5 + token: "${{ steps.app-token.outputs.token }}" + renovate-version: "${{ env.WORKFLOW_RENOVATE_VERSION }}" diff --git a/.gitignore b/.gitignore new file mode 100644 index 000000000..fadd74e4a --- /dev/null +++ b/.gitignore @@ -0,0 +1,11 @@ +.private/ +.task/ +scripts/ +*.secret.env +*.secret.yaml +*.key +.DS_Store +Thumbs.db +.decrypted~* +kubeconfig +talosconfig diff --git a/.sops.yaml b/.sops.yaml new file mode 100644 index 000000000..1dfc27c9c --- /dev/null +++ b/.sops.yaml @@ -0,0 +1,26 @@ +--- +creation_rules: + # IMPORTANT: Keep this rule first + - path_regex: talos/.+\.secret(\.sops)?\.ya?ml + input_type: yaml + encrypted_regex: ^(token|crt|key|id|secret|secretboxEncryptionSecret|ca|urls|extraManifests)$ + mac_only_encrypted: true + key_groups: + - age: + - age1frhtpr8u3u99pvcuq5mjevxdq9agjfpkd8fjtnpj9qymzh5v845q53f37d + - path_regex: kubernetes/.+\.secret(\.sops)?\.ya?ml + input_type: yaml + encrypted_regex: ^(data|stringData)$ + mac_only_encrypted: true + key_groups: + - age: + - age1frhtpr8u3u99pvcuq5mjevxdq9agjfpkd8fjtnpj9qymzh5v845q53f37d + - path_regex: kubernetes/.+\.secret(\.sops)?\.env + input_type: env + mac_only_encrypted: true + key_groups: + - age: + - age1frhtpr8u3u99pvcuq5mjevxdq9agjfpkd8fjtnpj9qymzh5v845q53f37d +stores: + yaml: + indent: 2 diff --git a/.taskfiles/VolSync/Taskfile.yaml b/.taskfiles/VolSync/Taskfile.yaml new file mode 100644 index 000000000..987bf04b5 --- /dev/null +++ b/.taskfiles/VolSync/Taskfile.yaml @@ -0,0 +1,221 @@ +--- +# yaml-language-server: $schema=https://taskfile.dev/schema.json +version: "3" + +# This taskfile is used to manage certain VolSync tasks for a given application, limitations are described below. +# 1. Fluxtomization, HelmRelease, PVC, ReplicationSource all have the same name (e.g. plex) +# 2. ReplicationSource and ReplicationDestination are a Restic repository +# 3. Applications are deployed as either a Kubernetes Deployment or StatefulSet +# 4. Each application only has one PVC that is being replicated + +x-env: &env + app: "{{.app}}" + claim: "{{.claim}}" + controller: "{{.controller}}" + job: "{{.job}}" + ns: "{{.ns}}" + pgid: "{{.pgid}}" + previous: "{{.previous}}" + puid: "{{.puid}}" + +vars: + VOLSYNC_SCRIPTS_DIR: "{{.ROOT_DIR}}/.taskfiles/VolSync/scripts" + VOLSYNC_TEMPLATES_DIR: "{{.ROOT_DIR}}/.taskfiles/VolSync/templates" + +tasks: + + suspend-controller: + desc: Suspends Volsync + summary: | + Args: + cluster: Cluster to run command against (required) + cmds: + - flux --context {{.cluster}} suspend ks volsync + - flux --context {{.cluster}} suspend hr -n volsync-system volsync + - kubectl --context {{.cluster}} -n volsync-system scale deployment volsync --replicas 0 + env: *env + requires: + vars: ["cluster"] + + list: + desc: List snapshots for an application + summary: | + Args: + cluster: Cluster to run command against (required) + ns: Namespace the PVC is in (default: default) + app: Application to list snapshots for (required) + cmds: + - envsubst < <(cat {{.VOLSYNC_TEMPLATES_DIR}}/list.tmpl.yaml) | kubectl --context {{.cluster}} apply -f - + - bash {{.VOLSYNC_SCRIPTS_DIR}}/wait-for-job.sh {{.job}} {{.ns}} {{.cluster}} + - kubectl --context {{.cluster}} -n {{.ns}} wait job/{{.job}} --for condition=complete --timeout=1m + - kubectl --context {{.cluster}} -n {{.ns}} logs job/{{.job}} --container main + - kubectl --context {{.cluster}} -n {{.ns}} delete job {{.job}} + env: *env + requires: + vars: ["cluster", "app"] + vars: + ns: '{{.ns | default "default"}}' + job: volsync-list-{{.app}} + preconditions: + - test -f {{.VOLSYNC_SCRIPTS_DIR}}/wait-for-job.sh + - test -f {{.VOLSYNC_TEMPLATES_DIR}}/list.tmpl.yaml + silent: true + + unlock: + desc: Unlock a Restic repository for an application + summary: | + Args: + cluster: Cluster to run command against (required) + ns: Namespace the PVC is in (default: default) + app: Application to unlock (required) + cmds: + - envsubst < <(cat {{.VOLSYNC_TEMPLATES_DIR}}/unlock.tmpl.yaml) | kubectl --context {{.cluster}} apply -f - + - bash {{.VOLSYNC_SCRIPTS_DIR}}/wait-for-job.sh {{.job}} {{.ns}} {{.cluster}} + - kubectl --context {{.cluster}} -n {{.ns}} wait job/{{.job}} --for condition=complete --timeout=1m + - kubectl --context {{.cluster}} -n {{.ns}} logs job/{{.job}} --container minio + - kubectl --context {{.cluster}} -n {{.ns}} logs job/{{.job}} --container r2 + - kubectl --context {{.cluster}} -n {{.ns}} delete job {{.job}} + env: *env + requires: + vars: ["cluster", "app"] + vars: + ns: '{{.ns | default "default"}}' + job: volsync-unlock-{{.app}} + preconditions: + - test -f {{.VOLSYNC_SCRIPTS_DIR}}/wait-for-job.sh + - test -f {{.VOLSYNC_TEMPLATES_DIR}}/unlock.tmpl.yaml + silent: true + + # To run backup jobs in parallel for all replicationsources: + # - kubectl get replicationsources --all-namespaces --no-headers | awk '{print $2, $1}' | xargs --max-procs=4 -l bash -c 'task volsync:snapshot app=$0 ns=$1' + snapshot: + desc: Snapshot a PVC for an application + summary: | + Args: + cluster: Cluster to run command against (required) + ns: Namespace the PVC is in (default: default) + app: Application to snapshot (required) + cmds: + - kubectl --context {{.cluster}} -n {{.ns}} patch replicationsources {{.app}} --type merge -p '{"spec":{"trigger":{"manual":"{{.now}}"}}}' + - bash {{.VOLSYNC_SCRIPTS_DIR}}/wait-for-job.sh {{.job}} {{.ns}} {{.cluster}} + - kubectl --context {{.cluster}} -n {{.ns}} wait job/{{.job}} --for condition=complete --timeout=120m + env: *env + requires: + vars: ["cluster", "app"] + vars: + now: '{{now | date "150405"}}' + ns: '{{.ns | default "default"}}' + job: volsync-src-{{.app}} + controller: + sh: true && {{.VOLSYNC_SCRIPTS_DIR}}/which-controller.sh {{.app}} {{.ns}} {{.cluster}} + preconditions: + - test -f {{.VOLSYNC_SCRIPTS_DIR}}/which-controller.sh + - test -f {{.VOLSYNC_SCRIPTS_DIR}}/wait-for-job.sh + - kubectl --context {{.cluster}} -n {{.ns}} get replicationsources {{.app}} + + # To run restore jobs in parallel for all replicationdestinations: + # - kubectl get replicationsources --all-namespaces --no-headers | awk '{print $2, $1}' | xargs --max-procs=4 -l bash -c 'task volsync:restore app=$0 ns=$1' + restore: + desc: Restore a PVC for an application + summary: | + Args: + cluster: Cluster to run command against (required) + ns: Namespace the PVC is in (default: default) + app: Application to restore (required) + previous: Previous number of snapshots to restore (default: 2) + cmds: + - { task: .suspend, vars: *env } + - { task: .wipe, vars: *env } + - { task: .restore, vars: *env } + - { task: .resume, vars: *env } + env: *env + requires: + vars: ["cluster", "app"] + vars: + ns: '{{.ns | default "default"}}' + previous: '{{.previous | default 2}}' + controller: + sh: "{{.VOLSYNC_SCRIPTS_DIR}}/which-controller.sh {{.app}} {{.ns}}" + claim: + sh: kubectl --context {{.cluster}} -n {{.ns}} get replicationsources/{{.app}} -o jsonpath="{.spec.sourcePVC}" + puid: + sh: kubectl --context {{.cluster}} -n {{.ns}} get replicationsources/{{.app}} -o jsonpath="{.spec.restic.moverSecurityContext.runAsUser}" + pgid: + sh: kubectl --context {{.cluster}} -n {{.ns}} get replicationsources/{{.app}} -o jsonpath="{.spec.restic.moverSecurityContext.runAsGroup}" + preconditions: + - test -f {{.VOLSYNC_SCRIPTS_DIR}}/which-controller.sh + - test -f {{.VOLSYNC_SCRIPTS_DIR}}/wait-for-job.sh + - test -f {{.VOLSYNC_TEMPLATES_DIR}}/replicationdestination.tmpl.yaml + - test -f {{.VOLSYNC_TEMPLATES_DIR}}/wipe.tmpl.yaml + + cleanup: + desc: Delete volume populator PVCs in all namespaces + summary: | + Args: + cluster: Cluster to run command against (required) + cmds: + - for: { var: dest } + cmd: | + {{- $items := (split "/" .ITEM) }} + kubectl --context {{.cluster}} delete pvc -n {{ $items._0 }} {{ $items._1 }} + - for: { var: cache } + cmd: | + {{- $items := (split "/" .ITEM) }} + kubectl --context {{.cluster}} delete pvc -n {{ $items._0 }} {{ $items._1 }} + - for: { var: snaps } + cmd: | + {{- $items := (split "/" .ITEM) }} + kubectl --context {{.cluster}} delete volumesnapshot -n {{ $items._0 }} {{ $items._1 }} + env: *env + requires: + vars: ["cluster"] + vars: + dest: + sh: kubectl --context {{.cluster}} get pvc --all-namespaces --no-headers | grep "dst-dest" | awk '{print $1 "/" $2}' + cache: + sh: kubectl --context {{.cluster}} get pvc --all-namespaces --no-headers | grep "dst-cache" | awk '{print $1 "/" $2}' + snaps: + sh: kubectl --context {{.cluster}} get volumesnapshot --all-namespaces --no-headers | grep "dst-dest" | awk '{print $1 "/" $2}' + + # Suspend the Flux ks and hr + .suspend: + internal: true + cmds: + - flux --context {{.cluster}} -n flux-system suspend kustomization {{.app}} + - flux --context {{.cluster}} -n {{.ns}} suspend helmrelease {{.app}} + - kubectl --context {{.cluster}} -n {{.ns}} scale {{.controller}} --replicas 0 + - kubectl --context {{.cluster}} -n {{.ns}} wait pod --for delete --selector="app.kubernetes.io/name={{.app}}" --timeout=2m + env: *env + + # Wipe the PVC of all data + .wipe: + internal: true + cmds: + - envsubst < <(cat {{.VOLSYNC_TEMPLATES_DIR}}/wipe.tmpl.yaml) | kubectl --context {{.cluster}} apply -f - + - bash {{.VOLSYNC_SCRIPTS_DIR}}/wait-for-job.sh {{.job}} {{.ns}} {{.cluster}} + - kubectl --context {{.cluster}} -n {{.ns}} wait job/{{.job}} --for condition=complete --timeout=120m + - kubectl --context {{.cluster}} -n {{.ns}} logs job/{{.job}} --container main + - kubectl --context {{.cluster}} -n {{.ns}} delete job {{.job}} + env: *env + vars: + job: volsync-wipe-{{.app}} + + # Create VolSync replicationdestination CR to restore data + .restore: + internal: true + cmds: + - envsubst < <(cat {{.VOLSYNC_TEMPLATES_DIR}}/replicationdestination.tmpl.yaml) | kubectl --context {{.cluster}} apply -f - + - bash {{.VOLSYNC_SCRIPTS_DIR}}/wait-for-job.sh {{.job}} {{.ns}} {{.cluster}} + - kubectl --context {{.cluster}} -n {{.ns}} wait job/{{.job}} --for condition=complete --timeout=120m + - kubectl --context {{.cluster}} -n {{.ns}} delete replicationdestination {{.app}} + env: *env + vars: + job: volsync-dst-{{.app}} + + # Resume Flux ks and hr + .resume: + internal: true + cmds: + - flux --context {{.cluster}} -n {{.ns}} resume helmrelease {{.app}} + - flux --context {{.cluster}} -n flux-system resume kustomization {{.app}} + env: *env diff --git a/.taskfiles/VolSync/templates/list.tmpl.yaml b/.taskfiles/VolSync/templates/list.tmpl.yaml new file mode 100644 index 000000000..201e0ea24 --- /dev/null +++ b/.taskfiles/VolSync/templates/list.tmpl.yaml @@ -0,0 +1,20 @@ +--- +apiVersion: batch/v1 +kind: Job +metadata: + name: ${job} + namespace: ${ns} +spec: + ttlSecondsAfterFinished: 3600 + template: + spec: + automountServiceAccountToken: false + restartPolicy: OnFailure + containers: + - name: main + image: docker.io/restic/restic:0.17.3 + args: ["snapshots"] + envFrom: + - secretRef: + name: ${app}-volsync-secret + resources: {} diff --git a/.taskfiles/VolSync/templates/replicationdestination.tmpl.yaml b/.taskfiles/VolSync/templates/replicationdestination.tmpl.yaml new file mode 100644 index 000000000..b1c7d4360 --- /dev/null +++ b/.taskfiles/VolSync/templates/replicationdestination.tmpl.yaml @@ -0,0 +1,29 @@ +--- +apiVersion: volsync.backube/v1alpha1 +kind: ReplicationDestination +metadata: + name: ${app} + namespace: ${ns} +spec: + trigger: + manual: restore-once + restic: + repository: ${app}-volsync-secret + destinationPVC: ${claim} + copyMethod: Direct + accessModes: ["ReadWriteOnce"] + # IMPORTANT NOTE: + # Set to the last X number of snapshots to restore from + previous: ${previous} + # OR; + # IMPORTANT NOTE: + # On bootstrap set `restoreAsOf` to the time the old cluster was destroyed. + # This will essentially prevent volsync from trying to restore a backup + # from a application that started with default data in the PVC. + # Do not restore snapshots made after the following RFC3339 Timestamp. + # date --rfc-3339=seconds (--utc) + # restoreAsOf: "2022-12-10T16:00:00-05:00" + moverSecurityContext: + runAsUser: ${puid} + runAsGroup: ${pgid} + fsGroup: ${pgid} diff --git a/.taskfiles/VolSync/templates/unlock.tmpl.yaml b/.taskfiles/VolSync/templates/unlock.tmpl.yaml new file mode 100644 index 000000000..bf2bb9e89 --- /dev/null +++ b/.taskfiles/VolSync/templates/unlock.tmpl.yaml @@ -0,0 +1,27 @@ +--- +apiVersion: batch/v1 +kind: Job +metadata: + name: ${job} + namespace: ${ns} +spec: + ttlSecondsAfterFinished: 3600 + template: + spec: + automountServiceAccountToken: false + restartPolicy: OnFailure + containers: + - name: minio + image: docker.io/restic/restic:0.17.3 + args: ["unlock", "--remove-all"] + envFrom: + - secretRef: + name: ${app}-volsync-secret + resources: {} + - name: r2 + image: docker.io/restic/restic:0.17.3 + args: ["unlock", "--remove-all"] + envFrom: + - secretRef: + name: ${app}-volsync-r2-secret + resources: {} diff --git a/.taskfiles/VolSync/templates/wipe.tmpl.yaml b/.taskfiles/VolSync/templates/wipe.tmpl.yaml new file mode 100644 index 000000000..ffc1cc75a --- /dev/null +++ b/.taskfiles/VolSync/templates/wipe.tmpl.yaml @@ -0,0 +1,26 @@ +--- +apiVersion: batch/v1 +kind: Job +metadata: + name: ${job} + namespace: ${ns} +spec: + ttlSecondsAfterFinished: 3600 + template: + spec: + automountServiceAccountToken: false + restartPolicy: OnFailure + containers: + - name: main + image: docker.io/library/alpine:latest + command: ["/bin/sh", "-c", "cd /config; find . -delete"] + volumeMounts: + - name: config + mountPath: /config + securityContext: + privileged: true + resources: {} + volumes: + - name: config + persistentVolumeClaim: + claimName: ${claim} diff --git a/.taskfiles/bootstrap/Taskfile.yaml b/.taskfiles/bootstrap/Taskfile.yaml new file mode 100644 index 000000000..540e56980 --- /dev/null +++ b/.taskfiles/bootstrap/Taskfile.yaml @@ -0,0 +1,91 @@ + +--- +# yaml-language-server: $schema=https://taskfile.dev/schema.json +version: '3' + +vars: + KUBERNETES_VERSION: + sh: yq 'select(document_index == 1).spec.postBuild.substitute.KUBERNETES_VERSION' {{.CLUSTER_DIR}}/apps/system-upgrade/system-upgrade-controller/ks.yaml + TALOS_CONTROLLER: + sh: talosctl config info --output json | jq --raw-output '.endpoints[]' | shuf -n 1 + TALOS_SCHEMATIC_ID: + sh: yq 'select(document_index == 1).spec.postBuild.substitute.TALOS_SCHEMATIC_ID' {{.CLUSTER_DIR}}/apps/system-upgrade/system-upgrade-controller/ks.yaml + TALOS_VERSION: + sh: yq 'select(document_index == 1).spec.postBuild.substitute.TALOS_VERSION' {{.CLUSTER_DIR}}/apps/system-upgrade/system-upgrade-controller/ks.yaml + +tasks: + + kubernetes: + desc: Bootstrap a Talos Kubernetes cluster backed by flux and sops + prompt: Bootstrap a Talos Kubernetes cluster ... continue? + summary: | + CLUSTER: Cluster to run command against (default: main) + vars: &vars + CLUSTER: '{{.CLUSTER}}' + cmds: + - { task: apply-config, vars: *vars } + - { task: etcd, vars: *vars } + - { task: conf, vars: *vars } + - { task: apps, vars: *vars } + - { task: flux, vars: *vars } + requires: + vars: ['CLUSTER'] + preconditions: + - talosctl config info &>/dev/null + - test -f {{.CLUSTER_DIR}}/talosconfig + apply-config: + internal: true + cmd: | + export TALOS_VERSION={{.TALOS_VERSION}} TALOS_SCHEMATIC_ID={{.TALOS_SCHEMATIC_ID}} KUBERNETES_VERSION={{.KUBERNETES_VERSION}} + sops --decrypt {{.CLUSTER_DIR}}/bootstrap/talos/assets/{{.HOSTNAME}}.secret.sops.yaml | \ + /usr/local/bin/envsubst | \ + talosctl apply-config --insecure --nodes {{.HOSTNAME}} --file /dev/stdin + env: *vars + requires: + vars: ['CLUSTER', 'HOSTNAME'] + preconditions: + - test -f {{.CLUSTER_DIR}}/talosconfig + - test -f {{.CLUSTER_DIR}}/bootstrap/talos/assets/{{.HOSTNAME}}.secret.sops.yaml + + etcd: + internal: true + cmd: until talosctl --nodes {{.TALOS_CONTROLLER}} bootstrap; do sleep 10; done + preconditions: + - test -f {{.CLUSTER_DIR}}/talosconfig + - talosctl config info &>/dev/null + + conf: + internal: true + cmd: talosctl kubeconfig --nodes {{.TALOS_CONTROLLER}} --force --force-context-name {{.CLUSTER}} {{.CLUSTER_DIR}} + preconditions: + - test -f {{.CLUSTER_DIR}}/talosconfig + - talosctl config info &>/dev/null + + apps: + internal: true + cmds: + - until kubectl wait --for=condition=Ready=False nodes --all --timeout=10m; do sleep 10; done + - kubectl label namespace kube-system topolvm.io/webhook=ignore + - helmfile --quiet --file {{.CLUSTER_DIR}}/bootstrap/helmfile.yaml apply --skip-diff-on-install --suppress-diff + - until kubectl wait --for=condition=Ready nodes --all --timeout=10m; do sleep 10; done + preconditions: + - test -f {{.CLUSTER_DIR}}/talosconfig + - test -f {{.CLUSTER_DIR}}/bootstrap/helmfile.yaml + - talosctl config info &>/dev/null + + flux: + internal: true + cmds: + - kubectl apply --server-side --kustomize {{.CLUSTER_DIR}}/bootstrap/flux + - sops --decrypt {{.CLUSTER_DIR}}/bootstrap/flux/age-key.secret.sops.yaml | kubectl apply --server-side --filename - + - sops --decrypt {{.CLUSTER_DIR}}/bootstrap/flux/deploy-key.secret.sops.yaml | kubectl apply --server-side --filename - + - sops --decrypt {{.CLUSTER_DIR}}/flux/vars/cluster-secrets.secret.sops.yaml | kubectl apply --server-side --filename - + - kubectl apply --server-side --kustomize {{.CLUSTER_DIR}}/flux/config + preconditions: + - test -f {{.ROOT_DIR}}/age.key + - test -f {{.CLUSTER_DIR}}/bootstrap/flux/age-key.secret.sops.yaml + - test -f {{.CLUSTER_DIR}}/bootstrap/flux/deploy-key.secret.sops.yaml + - test -f {{.CLUSTER_DIR}}/flux/vars/cluster-secrets.secret.sops.yaml + - sops filestatus {{.CLUSTER_DIR}}/bootstrap/flux/age-key.secret.sops.yaml | jq --exit-status '.encrypted' + - sops filestatus {{.CLUSTER_DIR}}/bootstrap/flux/deploy-key.secret.sops.yaml | jq --exit-status '.encrypted' + - sops filestatus {{.CLUSTER_DIR}}/flux/vars/cluster-secrets.secret.sops.yaml | jq --exit-status '.encrypted' diff --git a/.vscode/extensions.json b/.vscode/extensions.json new file mode 100644 index 000000000..5706d024c --- /dev/null +++ b/.vscode/extensions.json @@ -0,0 +1,14 @@ +{ + "recommendations": [ + "albert.TabOut", + "britesnow.vscode-toggle-quotes", + "fcrespo82.markdown-table-formatter", + "mikestead.dotenv", + "mitchdenny.ecdc", + "redhat.vscode-yaml", + "signageos.signageos-vscode-sops", + "will-stone.in-any-case", + "BriteSnow.vscode-toggle-quotes", + "PKief.material-icon-theme", + ] +} diff --git a/.vscode/settings.json b/.vscode/settings.json new file mode 100644 index 000000000..9e4581a4a --- /dev/null +++ b/.vscode/settings.json @@ -0,0 +1,57 @@ +{ + "editor.bracketPairColorization.enabled": true, + "editor.fontFamily": "FiraCode Nerd Font", + "editor.fontLigatures": true, + "editor.guides.bracketPairs": true, + "editor.guides.bracketPairsHorizontal": true, + "editor.guides.highlightActiveBracketPair": true, + "editor.hover.delay": 1500, + "editor.rulers": [ + 100 + ], + "editor.stickyScroll.enabled": false, + "explorer.autoReveal": false, + "files.associations": { + "**/*.json5": "jsonc" + }, + "files.trimTrailingWhitespace": true, + "material-icon-theme.files.associations": { + // TODO: https://github.com/PKief/vscode-material-icon-theme/issues/330 + "*.secret.sops.env": "lock", + "*.secret.sops.yaml": "lock" + }, + "material-icon-theme.folders.associations": { + // top level + ".archive": "archive", + // .github + ".github/workflows": "robot", + // namespaces + "actions-runner-system": "github", + "cert-manager": "guard", + "default": "home", + "digester-system": "hook", + "external-secrets": "secure", + "flux-system": "pipe", + "kube-system": "kubernetes", + "monitoring": "event", + "network": "connection", + "networking": "connection", + "observability": "event", + "rook-ceph": "base", + "storage": "dump", + "system-upgrade": "update", + "tools": "tools", + "volsync": "aws" + }, + "sops.defaults.ageKeyFile": "age.key", + "vs-kubernetes": { + "vs-kubernetes.kubeconfig": "./kubeconfig", + "vs-kubernetes.knownKubeconfigs": [ + "./kubeconfig" + ] + }, + "yaml.schemaStore.enable": true, + "yaml.schemas": { + "kubernetes": "./kubernetes/**/*.yaml" + } +} diff --git a/LICENSE b/LICENSE new file mode 100644 index 000000000..d919bd04f --- /dev/null +++ b/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2024 Georgi Panov + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/README.md b/README.md new file mode 100644 index 000000000..069fecc23 --- /dev/null +++ b/README.md @@ -0,0 +1 @@ +# home-ops \ No newline at end of file diff --git a/Taskfile.yaml b/Taskfile.yaml new file mode 100644 index 000000000..c7ee45817 --- /dev/null +++ b/Taskfile.yaml @@ -0,0 +1,20 @@ +--- +# yaml-language-server: $schema=https://taskfile.dev/schema.json +version: '3' + +vars: + CLUSTER: '{{.CLUSTER | default "main"}}' + CLUSTER_DIR: '{{.ROOT_DIR}}/kubernetes/{{.CLUSTER}}' + +env: + KUBECONFIG: '{{.CLUSTER_DIR}}/kubeconfig' + TALOSCONFIG: '{{.CLUSTER_DIR}}/talosconfig' + SOPS_AGE_KEY_FILE: '{{.ROOT_DIR}}/age.key' + +includes: + bootstrap: .taskfiles/bootstrap + volsync: .taskfiles/VolSync/Taskfile.yaml + +tasks: + + default: task --list diff --git a/kubernetes/main/apps/actions-runner-system/gha-runner-scale-set-controller/app/externalsecret.yaml b/kubernetes/main/apps/actions-runner-system/gha-runner-scale-set-controller/app/externalsecret.yaml new file mode 100644 index 000000000..986ef1f80 --- /dev/null +++ b/kubernetes/main/apps/actions-runner-system/gha-runner-scale-set-controller/app/externalsecret.yaml @@ -0,0 +1,24 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/external-secrets.io/externalsecret_v1beta1.json +apiVersion: external-secrets.io/v1beta1 +kind: ExternalSecret +metadata: + name: actions-runner-controller-auth +spec: + secretStoreRef: + kind: ClusterSecretStore + name: vault-backend + target: + name: actions-runner-controller-auth-secret + template: + engineVersion: v2 + data: + ACTION_RUNNER_CONTROLLER_GITHUB_APP_ID: |- + {{ .ACTION_RUNNER_CONTROLLER_GITHUB_APP_ID }} + ACTION_RUNNER_CONTROLLER_GITHUB_INSTALLATION_ID: |- + {{ .ACTION_RUNNER_CONTROLLER_GITHUB_INSTALLATION_ID }} + ACTION_RUNNER_CONTROLLER_GITHUB_PRIVATE_KEY: |- + {{ .ACTION_RUNNER_CONTROLLER_GITHUB_PRIVATE_KEY }} + dataFrom: + - extract: + key: secrets/actions-runner-controller diff --git a/kubernetes/main/apps/actions-runner-system/gha-runner-scale-set-controller/app/helmrelease.yaml b/kubernetes/main/apps/actions-runner-system/gha-runner-scale-set-controller/app/helmrelease.yaml new file mode 100644 index 000000000..9979dd8dc --- /dev/null +++ b/kubernetes/main/apps/actions-runner-system/gha-runner-scale-set-controller/app/helmrelease.yaml @@ -0,0 +1,28 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/helm.toolkit.fluxcd.io/helmrelease_v2.json +apiVersion: helm.toolkit.fluxcd.io/v2 +kind: HelmRelease +metadata: + name: gha-runner-scale-set-controller +spec: + interval: 30m + chart: + spec: + chart: gha-runner-scale-set-controller + version: 0.9.3 + sourceRef: + kind: HelmRepository + name: actions-runner-controller + namespace: flux-system + install: + crds: CreateReplace + remediation: + retries: 3 + upgrade: + cleanupOnFail: true + crds: CreateReplace + remediation: + strategy: rollback + retries: 3 + values: + fullnameOverride: gha-runner-scale-set-controller diff --git a/kubernetes/main/apps/actions-runner-system/gha-runner-scale-set-controller/app/kustomization.yaml b/kubernetes/main/apps/actions-runner-system/gha-runner-scale-set-controller/app/kustomization.yaml new file mode 100644 index 000000000..4eed917b9 --- /dev/null +++ b/kubernetes/main/apps/actions-runner-system/gha-runner-scale-set-controller/app/kustomization.yaml @@ -0,0 +1,7 @@ +--- +# yaml-language-server: $schema=https://json.schemastore.org/kustomization +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - ./externalsecret.yaml + - ./helmrelease.yaml diff --git a/kubernetes/main/apps/actions-runner-system/gha-runner-scale-set-controller/ks.yaml b/kubernetes/main/apps/actions-runner-system/gha-runner-scale-set-controller/ks.yaml new file mode 100644 index 000000000..28039c9a0 --- /dev/null +++ b/kubernetes/main/apps/actions-runner-system/gha-runner-scale-set-controller/ks.yaml @@ -0,0 +1,22 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/kustomize.toolkit.fluxcd.io/kustomization_v1.json +apiVersion: kustomize.toolkit.fluxcd.io/v1 +kind: Kustomization +metadata: + name: &app gha-runner-scale-set-controller + namespace: flux-system +spec: + targetNamespace: actions-runner-system + commonMetadata: + labels: + app.kubernetes.io/name: *app + dependsOn: + - name: external-secrets-stores + path: ./kubernetes/main/apps/actions-runner-system/gha-runner-scale-set-controller/app + prune: true + sourceRef: + kind: GitRepository + name: home-kubernetes + wait: false + interval: 30m + timeout: 5m diff --git a/kubernetes/main/apps/actions-runner-system/gha-runner-scale-set/app/helmrelease.yaml b/kubernetes/main/apps/actions-runner-system/gha-runner-scale-set/app/helmrelease.yaml new file mode 100644 index 000000000..d6c7afc65 --- /dev/null +++ b/kubernetes/main/apps/actions-runner-system/gha-runner-scale-set/app/helmrelease.yaml @@ -0,0 +1,57 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/helm.toolkit.fluxcd.io/helmrelease_v2.json +apiVersion: helm.toolkit.fluxcd.io/v2 +kind: HelmRelease +metadata: + name: gha-runner-scale-set +spec: + interval: 30m + chart: + spec: + chart: gha-runner-scale-set + version: 0.9.3 + sourceRef: + kind: HelmRepository + name: actions-runner-controller + namespace: flux-system + install: + remediation: + retries: 3 + upgrade: + cleanupOnFail: true + remediation: + strategy: rollback + retries: 3 + dependsOn: + - name: gha-runner-scale-set-controller + namespace: actions-runner-system + valuesFrom: + - targetPath: githubConfigSecret.github_app_id + kind: Secret + name: actions-runner-controller-auth-secret + valuesKey: ACTION_RUNNER_CONTROLLER_GITHUB_APP_ID + - targetPath: githubConfigSecret.github_app_installation_id + kind: Secret + name: actions-runner-controller-auth-secret + valuesKey: ACTION_RUNNER_CONTROLLER_GITHUB_INSTALLATION_ID + - targetPath: githubConfigSecret.github_app_private_key + kind: Secret + name: actions-runner-controller-auth-secret + valuesKey: ACTION_RUNNER_CONTROLLER_GITHUB_PRIVATE_KEY + values: + nameOverride: gha-runner-scale-set + runnerScaleSetName: gha-runner-scale-set + githubConfigUrl: https://github.com/Darkfella91/home-ops + minRunners: 1 + maxRunners: 6 + containerMode: + type: dind + template: + spec: + containers: + - name: runner + image: ghcr.io/onedr0p/actions-runner:2.321.0@sha256:d968199e3772ef831c34eb8edd495ef9eb99339a2f7176d4f1774f252f7903fb + command: ["/home/runner/run.sh"] + controllerServiceAccount: + name: gha-runner-scale-set-controller + namespace: actions-runner-system diff --git a/kubernetes/main/apps/actions-runner-system/gha-runner-scale-set/app/kustomization.yaml b/kubernetes/main/apps/actions-runner-system/gha-runner-scale-set/app/kustomization.yaml new file mode 100644 index 000000000..ce84014a3 --- /dev/null +++ b/kubernetes/main/apps/actions-runner-system/gha-runner-scale-set/app/kustomization.yaml @@ -0,0 +1,7 @@ +--- +# yaml-language-server: $schema=https://json.schemastore.org/kustomization +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - ./helmrelease.yaml + diff --git a/kubernetes/main/apps/actions-runner-system/gha-runner-scale-set/ks.yaml b/kubernetes/main/apps/actions-runner-system/gha-runner-scale-set/ks.yaml new file mode 100644 index 000000000..533dc04e4 --- /dev/null +++ b/kubernetes/main/apps/actions-runner-system/gha-runner-scale-set/ks.yaml @@ -0,0 +1,20 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/kustomize.toolkit.fluxcd.io/kustomization_v1.json +apiVersion: kustomize.toolkit.fluxcd.io/v1 +kind: Kustomization +metadata: + name: &app gha-runner-scale-set + namespace: flux-system +spec: + targetNamespace: actions-runner-system + commonMetadata: + labels: + app.kubernetes.io/name: *app + path: ./kubernetes/main/apps/actions-runner-system/gha-runner-scale-set/app + prune: true + sourceRef: + kind: GitRepository + name: home-kubernetes + wait: false + interval: 30m + timeout: 5m diff --git a/kubernetes/main/apps/actions-runner-system/kustomization.yaml b/kubernetes/main/apps/actions-runner-system/kustomization.yaml new file mode 100644 index 000000000..98183e38e --- /dev/null +++ b/kubernetes/main/apps/actions-runner-system/kustomization.yaml @@ -0,0 +1,10 @@ +--- +# yaml-language-server: $schema=https://json.schemastore.org/kustomization +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + # Pre Flux-Kustomizations + - ./namespace.yaml + # Flux-Kustomizations + - ./gha-runner-scale-set-controller/ks.yaml + - ./gha-runner-scale-set/ks.yaml diff --git a/kubernetes/main/apps/actions-runner-system/namespace.yaml b/kubernetes/main/apps/actions-runner-system/namespace.yaml new file mode 100644 index 000000000..7bdef02e2 --- /dev/null +++ b/kubernetes/main/apps/actions-runner-system/namespace.yaml @@ -0,0 +1,38 @@ +--- +apiVersion: v1 +kind: Namespace +metadata: + name: actions-runner-system + annotations: + kustomize.toolkit.fluxcd.io/prune: disabled + volsync.backube/privileged-movers: "true" +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/notification.toolkit.fluxcd.io/provider_v1beta3.json +apiVersion: notification.toolkit.fluxcd.io/v1beta3 +kind: Provider +metadata: + name: alert-manager + namespace: actions-runner-system +spec: + type: alertmanager + address: http://alertmanager-operated.observability.svc.cluster.local:9093/api/v2/alerts/ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/notification.toolkit.fluxcd.io/alert_v1beta3.json +apiVersion: notification.toolkit.fluxcd.io/v1beta3 +kind: Alert +metadata: + name: alert-manager + namespace: actions-runner-system +spec: + providerRef: + name: alert-manager + eventSeverity: error + eventSources: + - kind: HelmRelease + name: "*" + exclusionList: + - "error.*lookup github\\.com" + - "error.*lookup raw\\.githubusercontent\\.com" + - "dial.*tcp.*timeout" + - "waiting.*socket" + suspend: false diff --git a/kubernetes/main/apps/cert-manager/cert-manager/app/helm-values.yaml b/kubernetes/main/apps/cert-manager/cert-manager/app/helm-values.yaml new file mode 100644 index 000000000..8e3e5ac23 --- /dev/null +++ b/kubernetes/main/apps/cert-manager/cert-manager/app/helm-values.yaml @@ -0,0 +1,7 @@ +installCRDs: true +dns01RecursiveNameservers: https://1.1.1.1:443/dns-query,https://1.0.0.1:443/dns-query +dns01RecursiveNameserversOnly: true +prometheus: + enabled: true + servicemonitor: + enabled: true diff --git a/kubernetes/main/apps/cert-manager/cert-manager/app/helmrelease.yaml b/kubernetes/main/apps/cert-manager/cert-manager/app/helmrelease.yaml new file mode 100644 index 000000000..0001e424b --- /dev/null +++ b/kubernetes/main/apps/cert-manager/cert-manager/app/helmrelease.yaml @@ -0,0 +1,27 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/helm.toolkit.fluxcd.io/helmrelease_v2.json +apiVersion: helm.toolkit.fluxcd.io/v2 +kind: HelmRelease +metadata: + name: cert-manager +spec: + interval: 30m + chart: + spec: + chart: cert-manager + version: v1.16.1 + sourceRef: + kind: HelmRepository + name: jetstack + namespace: flux-system + install: + remediation: + retries: 3 + upgrade: + cleanupOnFail: true + remediation: + strategy: rollback + retries: 3 + valuesFrom: + - kind: ConfigMap + name: cert-manager-helm-values diff --git a/kubernetes/main/apps/cert-manager/cert-manager/app/kustomization.yaml b/kubernetes/main/apps/cert-manager/cert-manager/app/kustomization.yaml new file mode 100644 index 000000000..fcccbb92a --- /dev/null +++ b/kubernetes/main/apps/cert-manager/cert-manager/app/kustomization.yaml @@ -0,0 +1,14 @@ +--- +# yaml-language-server: $schema=https://json.schemastore.org/kustomization +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - ./helmrelease.yaml + - ./prometheusrule.yaml + - ./rbac.yaml +configMapGenerator: + - name: cert-manager-helm-values + files: + - values.yaml=./helm-values.yaml +configurations: + - kustomizeconfig.yaml diff --git a/kubernetes/main/apps/cert-manager/cert-manager/app/kustomizeconfig.yaml b/kubernetes/main/apps/cert-manager/cert-manager/app/kustomizeconfig.yaml new file mode 100644 index 000000000..58f92ba15 --- /dev/null +++ b/kubernetes/main/apps/cert-manager/cert-manager/app/kustomizeconfig.yaml @@ -0,0 +1,7 @@ +--- +nameReference: + - kind: ConfigMap + version: v1 + fieldSpecs: + - path: spec/valuesFrom/name + kind: HelmRelease diff --git a/kubernetes/main/apps/cert-manager/cert-manager/app/prometheusrule.yaml b/kubernetes/main/apps/cert-manager/cert-manager/app/prometheusrule.yaml new file mode 100644 index 000000000..ae08bb147 --- /dev/null +++ b/kubernetes/main/apps/cert-manager/cert-manager/app/prometheusrule.yaml @@ -0,0 +1,68 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/monitoring.coreos.com/prometheusrule_v1.json +apiVersion: monitoring.coreos.com/v1 +kind: PrometheusRule +metadata: + name: cert-manager-rules +spec: + groups: + - name: cert-manager + rules: + - alert: CertManagerAbsent + expr: | + absent(up{job="cert-manager"}) + for: 15m + labels: + severity: critical + annotations: + description: + "New certificates will not be able to be minted, and existing + ones can't be renewed until cert-manager is back." + runbook_url: https://gitlab.com/uneeq-oss/cert-manager-mixin/-/blob/master/RUNBOOK.md#certmanagerabsent + summary: "Cert Manager has dissapeared from Prometheus service discovery." + - name: certificates + rules: + - alert: CertManagerCertExpirySoon + expr: | + avg by (exported_namespace, namespace, name) ( + certmanager_certificate_expiration_timestamp_seconds - time()) + < (21 * 24 * 3600) + for: 15m + labels: + severity: warning + annotations: + description: + "The domain that this cert covers will be unavailable after + {{ $value | humanizeDuration }}. Clients using endpoints that this cert + protects will start to fail in {{ $value | humanizeDuration }}." + runbook_url: https://gitlab.com/uneeq-oss/cert-manager-mixin/-/blob/master/RUNBOOK.md#certmanagercertexpirysoon + summary: + "The cert {{ $labels.name }} is {{ $value | humanizeDuration }} + from expiry, it should have renewed over a week ago." + - alert: CertManagerCertNotReady + expr: | + max by (name, exported_namespace, namespace, condition) ( + certmanager_certificate_ready_status{condition!="True"} == 1) + for: 15m + labels: + severity: critical + annotations: + description: + "This certificate has not been ready to serve traffic for at least + 15m. If the cert is being renewed or there is another valid cert, the ingress + controller _may_ be able to serve that instead." + runbook_url: https://gitlab.com/uneeq-oss/cert-manager-mixin/-/blob/master/RUNBOOK.md#certmanagercertnotready + summary: "The cert {{ $labels.name }} is not ready to serve traffic." + - alert: CertManagerHittingRateLimits + expr: | + sum by (host) (rate(certmanager_http_acme_client_request_count{status="429"}[5m])) + > 0 + for: 15m + labels: + severity: critical + annotations: + description: + "Depending on the rate limit, cert-manager may be unable to generate + certificates for up to a week." + runbook_url: https://gitlab.com/uneeq-oss/cert-manager-mixin/-/blob/master/RUNBOOK.md#certmanagerhittingratelimits + summary: "Cert manager hitting LetsEncrypt rate limits." diff --git a/kubernetes/main/apps/cert-manager/cert-manager/app/rbac.yaml b/kubernetes/main/apps/cert-manager/cert-manager/app/rbac.yaml new file mode 100644 index 000000000..e0e7e2c5a --- /dev/null +++ b/kubernetes/main/apps/cert-manager/cert-manager/app/rbac.yaml @@ -0,0 +1,24 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: vault-issuer + namespace: cert-manager +rules: + - apiGroups: [''] + resources: ['serviceaccounts/token'] + resourceNames: ['vault-issuer'] + verbs: ['create'] +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: vault-issuer + namespace: cert-manager +subjects: + - kind: ServiceAccount + name: cert-manager + namespace: cert-manager +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: vault-issuer diff --git a/kubernetes/main/apps/cert-manager/cert-manager/issuers/externalsecret.yaml b/kubernetes/main/apps/cert-manager/cert-manager/issuers/externalsecret.yaml new file mode 100644 index 000000000..d2751201c --- /dev/null +++ b/kubernetes/main/apps/cert-manager/cert-manager/issuers/externalsecret.yaml @@ -0,0 +1,23 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/external-secrets.io/externalsecret_v1beta1.json +apiVersion: external-secrets.io/v1beta1 +kind: ExternalSecret +metadata: + name: zerossl-credentials +spec: + secretStoreRef: + kind: ClusterSecretStore + name: vault-backend + target: + name: zerossl-credentials + template: + engineVersion: v2 + data: + - secretKey: CF_API_TOKEN + remoteRef: + key: secrets/cloudflare + property: CF_API_TOKEN + - secretKey: EAB_HMAC_KEY + remoteRef: + key: secrets/zerossl + property: EAB_HMAC_KEY diff --git a/kubernetes/main/apps/cert-manager/cert-manager/issuers/issuers.yaml b/kubernetes/main/apps/cert-manager/cert-manager/issuers/issuers.yaml new file mode 100644 index 000000000..f6e3afa85 --- /dev/null +++ b/kubernetes/main/apps/cert-manager/cert-manager/issuers/issuers.yaml @@ -0,0 +1,36 @@ +--- +apiVersion: cert-manager.io/v1 +kind: ClusterIssuer +metadata: + name: zerossl-prod +spec: + acme: + server: https://acme.zerossl.com/v2/DV90 + privateKeySecretRef: + name: zerossl-prod + externalAccountBinding: + keyID: ${EAB_KEY_ID} + keySecretRef: + name: &secret zerossl-credentials + key: EAB_HMAC_KEY + solvers: + - dns01: + cloudflare: + apiTokenSecretRef: + name: *secret + key: CF_API_TOKEN +--- +apiVersion: cert-manager.io/v1 +kind: ClusterIssuer +metadata: + name: vault-issuer +spec: + vault: + path: pki_iss/sign/kubernetes-cluster + server: https://vault.${PUBLIC_DOMAIN}:8200 + auth: + kubernetes: + role: issuer + mountPath: /v1/auth/kubernetes + serviceAccountRef: + name: vault-issuer diff --git a/kubernetes/main/apps/cert-manager/cert-manager/issuers/kustomization.yaml b/kubernetes/main/apps/cert-manager/cert-manager/issuers/kustomization.yaml new file mode 100644 index 000000000..d6ac943fc --- /dev/null +++ b/kubernetes/main/apps/cert-manager/cert-manager/issuers/kustomization.yaml @@ -0,0 +1,7 @@ +--- +# yaml-language-server: $schema=https://json.schemastore.org/kustomization +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - ./externalsecret.yaml + - ./issuers.yaml diff --git a/kubernetes/main/apps/cert-manager/cert-manager/ks.yaml b/kubernetes/main/apps/cert-manager/cert-manager/ks.yaml new file mode 100644 index 000000000..af9134e15 --- /dev/null +++ b/kubernetes/main/apps/cert-manager/cert-manager/ks.yaml @@ -0,0 +1,43 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/kustomize.toolkit.fluxcd.io/kustomization_v1.json +apiVersion: kustomize.toolkit.fluxcd.io/v1 +kind: Kustomization +metadata: + name: &app cert-manager + namespace: flux-system +spec: + targetNamespace: cert-manager + commonMetadata: + labels: + app.kubernetes.io/name: *app + path: ./kubernetes/main/apps/cert-manager/cert-manager/app + prune: true + sourceRef: + kind: GitRepository + name: home-kubernetes + wait: true + interval: 30m + timeout: 5m +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/kustomize.toolkit.fluxcd.io/kustomization_v1.json +apiVersion: kustomize.toolkit.fluxcd.io/v1 +kind: Kustomization +metadata: + name: &app cert-manager-issuers + namespace: flux-system +spec: + targetNamespace: cert-manager + commonMetadata: + labels: + app.kubernetes.io/name: *app + dependsOn: + - name: cert-manager + - name: external-secrets-stores + path: ./kubernetes/main/apps/cert-manager/cert-manager/issuers + prune: true + sourceRef: + kind: GitRepository + name: home-kubernetes + wait: true + interval: 30m + timeout: 5m diff --git a/kubernetes/main/apps/cert-manager/certificates/app/certificates.yaml b/kubernetes/main/apps/cert-manager/certificates/app/certificates.yaml new file mode 100644 index 000000000..4766d9f85 --- /dev/null +++ b/kubernetes/main/apps/cert-manager/certificates/app/certificates.yaml @@ -0,0 +1,29 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/cert-manager.io/certificate_v1.json +apiVersion: cert-manager.io/v1 +kind: Certificate +metadata: + name: darkfellanet +spec: + secretName: darkfellanet-tls + issuerRef: + name: zerossl-prod + kind: ClusterIssuer + commonName: ${PUBLIC_DOMAIN} + dnsNames: + - ${PUBLIC_DOMAIN} + - "*.${PUBLIC_DOMAIN}" +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/cert-manager.io/certificate_v1.json +apiVersion: cert-manager.io/v1 +kind: Certificate +metadata: + name: keycloak-tls +spec: + secretName: keycloak-tls + issuerRef: + name: zerossl-prod + kind: ClusterIssuer + dnsNames: + - auth.${PUBLIC_DOMAIN} + - auth-admin.${PUBLIC_DOMAIN} diff --git a/kubernetes/main/apps/cert-manager/certificates/app/kustomization.yaml b/kubernetes/main/apps/cert-manager/certificates/app/kustomization.yaml new file mode 100644 index 000000000..a2b5d2050 --- /dev/null +++ b/kubernetes/main/apps/cert-manager/certificates/app/kustomization.yaml @@ -0,0 +1,7 @@ +--- +# yaml-language-server: $schema=https://json.schemastore.org/kustomization +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - ./certificates.yaml + - ./pushsecret.yaml diff --git a/kubernetes/main/apps/cert-manager/certificates/app/pushsecret.yaml b/kubernetes/main/apps/cert-manager/certificates/app/pushsecret.yaml new file mode 100644 index 000000000..157ad2f83 --- /dev/null +++ b/kubernetes/main/apps/cert-manager/certificates/app/pushsecret.yaml @@ -0,0 +1,59 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/external-secrets.io/pushsecret_v1alpha1.json +apiVersion: external-secrets.io/v1alpha1 +kind: PushSecret +metadata: + name: darkfellanet-tls +spec: + secretStoreRefs: + - name: vault-backend + kind: ClusterSecretStore + selector: + secret: + name: darkfellanet-tls + template: + engineVersion: v2 + data: + tls.crt: '{{ index . "tls.crt" | b64enc }}' + tls.key: '{{ index . "tls.key" | b64enc }}' + data: + - match: + secretKey: &key tls.crt + remoteRef: + remoteKey: certificates/darkfellanet + property: *key + - match: + secretKey: &key tls.key + remoteRef: + remoteKey: certificates/darkfellanet + property: *key +--- +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/external-secrets.io/pushsecret_v1alpha1.json +apiVersion: external-secrets.io/v1alpha1 +kind: PushSecret +metadata: + name: keycloak-tls +spec: + secretStoreRefs: + - name: vault-backend + kind: ClusterSecretStore + selector: + secret: + name: keycloak-tls + template: + engineVersion: v2 + data: + tls.crt: '{{ index . "tls.crt" | b64enc }}' + tls.key: '{{ index . "tls.key" | b64enc }}' + data: + - match: + secretKey: &key tls.crt + remoteRef: + remoteKey: certificates/keycloak + property: *key + - match: + secretKey: &key tls.key + remoteRef: + remoteKey: certificates/keycloak + property: *key diff --git a/kubernetes/main/apps/cert-manager/certificates/import/clusterexternalsecret.yaml b/kubernetes/main/apps/cert-manager/certificates/import/clusterexternalsecret.yaml new file mode 100644 index 000000000..21f8705e8 --- /dev/null +++ b/kubernetes/main/apps/cert-manager/certificates/import/clusterexternalsecret.yaml @@ -0,0 +1,43 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/external-secrets.io/clusterexternalsecret_v1beta1.json +apiVersion: external-secrets.io/v1beta1 +kind: ClusterExternalSecret +metadata: + name: darkfellanet-tls +spec: + externalSecretName: darkfellanet-tls + namespaceSelector: + matchExpressions: + - key: kubernetes.io/metadata.name + operator: In + values: + - network + - vault + refreshTime: 5m + externalSecretSpec: + refreshInterval: 5m + secretStoreRef: + kind: ClusterSecretStore + name: vault-backend + target: + name: darkfellanet-tls + creationPolicy: Orphan + template: + engineVersion: v2 + type: kubernetes.io/tls + metadata: + annotations: + cert-manager.io/alt-names: '*.${PUBLIC_DOMAIN},${PUBLIC_DOMAIN}' + cert-manager.io/certificate-name: darkfellanet + cert-manager.io/common-name: ${PUBLIC_DOMAIN} + cert-manager.io/ip-sans: "" + cert-manager.io/issuer-group: "" + cert-manager.io/issuer-kind: ClusterIssuer + cert-manager.io/issuer-name: zerossl-prod + cert-manager.io/uri-sans: "" + labels: + controller.cert-manager.io/fao: "true" + dataFrom: + - extract: + key: secrets/certificates/darkfellanet + decodingStrategy: Auto diff --git a/kubernetes/main/apps/cert-manager/certificates/import/kustomization.yaml b/kubernetes/main/apps/cert-manager/certificates/import/kustomization.yaml new file mode 100644 index 000000000..fb80e7cda --- /dev/null +++ b/kubernetes/main/apps/cert-manager/certificates/import/kustomization.yaml @@ -0,0 +1,6 @@ +--- +# yaml-language-server: $schema=https://json.schemastore.org/kustomization +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - ./clusterexternalsecret.yaml diff --git a/kubernetes/main/apps/cert-manager/certificates/ks.yaml b/kubernetes/main/apps/cert-manager/certificates/ks.yaml new file mode 100644 index 000000000..3ad003b6c --- /dev/null +++ b/kubernetes/main/apps/cert-manager/certificates/ks.yaml @@ -0,0 +1,46 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/kustomize.toolkit.fluxcd.io/kustomization_v1.json +apiVersion: kustomize.toolkit.fluxcd.io/v1 +kind: Kustomization +metadata: + name: &app certificates-import + namespace: flux-system +spec: + targetNamespace: cert-manager + commonMetadata: + labels: + app.kubernetes.io/name: *app + dependsOn: + - name: external-secrets-stores + path: ./kubernetes/main/apps/cert-manager/certificates/import + prune: false + sourceRef: + kind: GitRepository + name: home-kubernetes + wait: true + interval: 30m + timeout: 5m +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/kustomize.toolkit.fluxcd.io/kustomization_v1.json +apiVersion: kustomize.toolkit.fluxcd.io/v1 +kind: Kustomization +metadata: + name: &app certificates + namespace: flux-system +spec: + targetNamespace: cert-manager + commonMetadata: + labels: + app.kubernetes.io/name: *app + dependsOn: + - name: certificates-import + - name: cert-manager-issuers + - name: external-secrets-stores + path: ./kubernetes/main/apps/cert-manager/certificates/app + prune: false + sourceRef: + kind: GitRepository + name: home-kubernetes + wait: true + interval: 30m + timeout: 5m diff --git a/kubernetes/main/apps/cert-manager/kustomization.yaml b/kubernetes/main/apps/cert-manager/kustomization.yaml new file mode 100644 index 000000000..890b1baa9 --- /dev/null +++ b/kubernetes/main/apps/cert-manager/kustomization.yaml @@ -0,0 +1,10 @@ +--- +# yaml-language-server: $schema=https://json.schemastore.org/kustomization +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + # Pre Flux-Kustomizations + - ./namespace.yaml + # Flux-Kustomizations + - ./cert-manager/ks.yaml + - ./certificates/ks.yaml diff --git a/kubernetes/main/apps/cert-manager/namespace.yaml b/kubernetes/main/apps/cert-manager/namespace.yaml new file mode 100644 index 000000000..9e6a66025 --- /dev/null +++ b/kubernetes/main/apps/cert-manager/namespace.yaml @@ -0,0 +1,38 @@ +--- +apiVersion: v1 +kind: Namespace +metadata: + name: cert-manager + annotations: + kustomize.toolkit.fluxcd.io/prune: disabled + volsync.backube/privileged-movers: "true" +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/notification.toolkit.fluxcd.io/provider_v1beta3.json +apiVersion: notification.toolkit.fluxcd.io/v1beta3 +kind: Provider +metadata: + name: alert-manager + namespace: cert-manager +spec: + type: alertmanager + address: http://alertmanager-operated.observability.svc.cluster.local:9093/api/v2/alerts/ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/notification.toolkit.fluxcd.io/alert_v1beta3.json +apiVersion: notification.toolkit.fluxcd.io/v1beta3 +kind: Alert +metadata: + name: alert-manager + namespace: cert-manager +spec: + providerRef: + name: alert-manager + eventSeverity: error + eventSources: + - kind: HelmRelease + name: "*" + exclusionList: + - "error.*lookup github\\.com" + - "error.*lookup raw\\.githubusercontent\\.com" + - "dial.*tcp.*timeout" + - "waiting.*socket" + suspend: false diff --git a/kubernetes/main/apps/database/cloudnative-pg/app/externalsecret.yaml b/kubernetes/main/apps/database/cloudnative-pg/app/externalsecret.yaml new file mode 100644 index 000000000..c6711b6e1 --- /dev/null +++ b/kubernetes/main/apps/database/cloudnative-pg/app/externalsecret.yaml @@ -0,0 +1,34 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/external-secrets.io/externalsecret_v1beta1.json +apiVersion: external-secrets.io/v1beta1 +kind: ExternalSecret +metadata: + name: cloudnative-pg-secret +spec: + secretStoreRef: + kind: ClusterSecretStore + name: vault-backend + target: + name: cloudnative-pg-secret + template: + engineVersion: v2 + metadata: + labels: + cnpg.io/reload: "true" + data: + - secretKey: username + remoteRef: + key: secrets/cloudnative-pg + property: POSTGRES_SUPER_USER + - secretKey: password + remoteRef: + key: secrets/cloudnative-pg + property: POSTGRES_SUPER_PASS + - secretKey: CF_ACCESS_KEY_ID + remoteRef: + key: secrets/cloudflare + property: CF_ACCESS_KEY_ID + - secretKey: CF_SECRET_ACCESS_KEY + remoteRef: + key: secrets/cloudflare + property: CF_SECRET_ACCESS_KEY diff --git a/kubernetes/main/apps/database/cloudnative-pg/app/helmrelease.yaml b/kubernetes/main/apps/database/cloudnative-pg/app/helmrelease.yaml new file mode 100644 index 000000000..44439a68e --- /dev/null +++ b/kubernetes/main/apps/database/cloudnative-pg/app/helmrelease.yaml @@ -0,0 +1,31 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/helm.toolkit.fluxcd.io/helmrelease_v2.json +apiVersion: helm.toolkit.fluxcd.io/v2 +kind: HelmRelease +metadata: + name: cloudnative-pg +spec: + interval: 30m + chart: + spec: + chart: cloudnative-pg + version: 0.22.1 + sourceRef: + kind: HelmRepository + name: cloudnative-pg + namespace: flux-system + install: + remediation: + retries: 3 + upgrade: + cleanupOnFail: true + remediation: + strategy: rollback + retries: 3 + values: + crds: + create: true + monitoring: + podMonitorEnabled: false + grafanaDashboard: + create: true diff --git a/kubernetes/main/apps/database/cloudnative-pg/app/kustomization.yaml b/kubernetes/main/apps/database/cloudnative-pg/app/kustomization.yaml new file mode 100644 index 000000000..4eed917b9 --- /dev/null +++ b/kubernetes/main/apps/database/cloudnative-pg/app/kustomization.yaml @@ -0,0 +1,7 @@ +--- +# yaml-language-server: $schema=https://json.schemastore.org/kustomization +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - ./externalsecret.yaml + - ./helmrelease.yaml diff --git a/kubernetes/main/apps/database/cloudnative-pg/cluster/cluster17.yaml b/kubernetes/main/apps/database/cloudnative-pg/cluster/cluster17.yaml new file mode 100644 index 000000000..cc4edef36 --- /dev/null +++ b/kubernetes/main/apps/database/cloudnative-pg/cluster/cluster17.yaml @@ -0,0 +1,75 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/postgresql.cnpg.io/cluster_v1.json +apiVersion: postgresql.cnpg.io/v1 +kind: Cluster +metadata: + name: postgres17 +spec: + instances: 2 + imageName: ghcr.io/cloudnative-pg/postgresql:17.0-18@sha256:16531710c07d8b3eb7ff86a238815eaf8214a578a19c869c0214b895467d29d0 + primaryUpdateStrategy: unsupervised + storage: + pvcTemplate: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 20Gi + resizeInUseVolumes: true + walStorage: + pvcTemplate: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 20Gi + superuserSecret: + name: &secret cloudnative-pg-secret + enableSuperuserAccess: true + postgresql: + parameters: + max_connections: "400" + shared_buffers: 256MB + nodeMaintenanceWindow: + inProgress: false + reusePVC: true + resources: + requests: + cpu: 500m + limits: + memory: 4Gi + monitoring: + enablePodMonitor: true + backup: + retentionPolicy: 30d + barmanObjectStore: &barmanObjectStore + data: + compression: bzip2 + encryption: AES256 + wal: + compression: bzip2 + encryption: AES256 + maxParallel: 8 + destinationPath: s3://backups/cloudnative-pg/ + endpointURL: ${S3URL} + # Note: serverName version needs to be inclemented + # when recovering from an existing cnpg cluster + serverName: ¤tCluster postgres17-v2 + s3Credentials: + accessKeyId: + name: *secret + key: CF_ACCESS_KEY_ID + secretAccessKey: + name: *secret + key: CF_SECRET_ACCESS_KEY + # Note: previousCluster needs to be set to the name of the previous + # cluster when recovering from an existing cnpg cluster + bootstrap: + recovery: + source: &previousCluster postgres17-v1 + # Note: externalClusters is needed when recovering from an existing cnpg cluster + externalClusters: + - name: *previousCluster + barmanObjectStore: + <<: *barmanObjectStore + serverName: *previousCluster diff --git a/kubernetes/main/apps/database/cloudnative-pg/cluster/gatus.yaml b/kubernetes/main/apps/database/cloudnative-pg/cluster/gatus.yaml new file mode 100644 index 000000000..3a0723a5a --- /dev/null +++ b/kubernetes/main/apps/database/cloudnative-pg/cluster/gatus.yaml @@ -0,0 +1,21 @@ +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: postgres-gatus-ep + labels: + gatus.io/enabled: "true" +data: + config.yaml: | + endpoints: + - name: postgres17 + group: infrastructure + url: tcp://postgres17-rw.database.svc.cluster.local:5432 + interval: 1m + ui: + hide-url: true + hide-hostname: true + conditions: + - "[CONNECTED] == true" + alerts: + - type: pushover diff --git a/kubernetes/main/apps/database/cloudnative-pg/cluster/kustomization.yaml b/kubernetes/main/apps/database/cloudnative-pg/cluster/kustomization.yaml new file mode 100644 index 000000000..4bbea0d6c --- /dev/null +++ b/kubernetes/main/apps/database/cloudnative-pg/cluster/kustomization.yaml @@ -0,0 +1,9 @@ +--- +# yaml-language-server: $schema=https://json.schemastore.org/kustomization +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - ./scheduledbackup.yaml + - ./prometheusrule.yaml + - ./cluster17.yaml + - ./gatus.yaml diff --git a/kubernetes/main/apps/database/cloudnative-pg/cluster/prometheusrule.yaml b/kubernetes/main/apps/database/cloudnative-pg/cluster/prometheusrule.yaml new file mode 100644 index 000000000..9c1d6a8db --- /dev/null +++ b/kubernetes/main/apps/database/cloudnative-pg/cluster/prometheusrule.yaml @@ -0,0 +1,67 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/monitoring.coreos.com/prometheusrule_v1.json +apiVersion: monitoring.coreos.com/v1 +kind: PrometheusRule +metadata: + name: cloudnative-pg-rules + labels: + prometheus: k8s + role: alert-rules +spec: + groups: + - name: cloudnative-pg.rules + rules: + - alert: LongRunningTransaction + annotations: + description: Pod {{ $labels.pod }} is taking more than 5 minutes (300 seconds) for a query. + summary: A query is taking longer than 5 minutes. + expr: |- + cnpg_backends_max_tx_duration_seconds > 300 + for: 1m + labels: + severity: warning + - alert: BackendsWaiting + annotations: + description: Pod {{ $labels.pod }} has been waiting for longer than 5 minutes + summary: If a backend is waiting for longer than 5 minutes + expr: |- + cnpg_backends_waiting_total > 300 + for: 1m + labels: + severity: warning + - alert: PGDatabase + annotations: + description: Over 150,000,000 transactions from frozen xid on pod {{ $labels.pod }} + summary: Number of transactions from the frozen XID to the current one + expr: |- + cnpg_pg_database_xid_age > 150000000 + for: 1m + labels: + severity: warning + - alert: PGReplication + annotations: + description: Standby is lagging behind by over 300 seconds (5 minutes) + summary: The standby is lagging behind the primary + expr: |- + cnpg_pg_replication_lag > 300 + for: 1m + labels: + severity: warning + - alert: LastFailedArchiveTime + annotations: + description: Archiving failed for {{ $labels.pod }} + summary: Checks the last time archiving failed. Will be < 0 when it has not failed. + expr: |- + (cnpg_pg_stat_archiver_last_failed_time - cnpg_pg_stat_archiver_last_archived_time) > 1 + for: 1m + labels: + severity: warning + - alert: DatabaseDeadlockConflicts + annotations: + description: There are over 10 deadlock conflicts in {{ $labels.pod }} + summary: Checks the number of database conflicts + expr: |- + cnpg_pg_stat_database_deadlocks > 10 + for: 1m + labels: + severity: warning diff --git a/kubernetes/main/apps/database/cloudnative-pg/cluster/scheduledbackup.yaml b/kubernetes/main/apps/database/cloudnative-pg/cluster/scheduledbackup.yaml new file mode 100644 index 000000000..622733b8f --- /dev/null +++ b/kubernetes/main/apps/database/cloudnative-pg/cluster/scheduledbackup.yaml @@ -0,0 +1,12 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/postgresql.cnpg.io/scheduledbackup_v1.json +apiVersion: postgresql.cnpg.io/v1 +kind: ScheduledBackup +metadata: + name: postgres17 +spec: + schedule: "@daily" + immediate: true + backupOwnerReference: self + cluster: + name: postgres17 diff --git a/kubernetes/main/apps/database/cloudnative-pg/ks.yaml b/kubernetes/main/apps/database/cloudnative-pg/ks.yaml new file mode 100644 index 000000000..c987243db --- /dev/null +++ b/kubernetes/main/apps/database/cloudnative-pg/ks.yaml @@ -0,0 +1,44 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/kustomize.toolkit.fluxcd.io/kustomization_v1.json +apiVersion: kustomize.toolkit.fluxcd.io/v1 +kind: Kustomization +metadata: + name: &app cloudnative-pg + namespace: flux-system +spec: + targetNamespace: database + commonMetadata: + labels: + app.kubernetes.io/name: *app + dependsOn: + - name: external-secrets-stores + path: ./kubernetes/main/apps/database/cloudnative-pg/app + prune: true + sourceRef: + kind: GitRepository + name: home-kubernetes + wait: true + interval: 30m + timeout: 5m +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/kustomize.toolkit.fluxcd.io/kustomization_v1.json +apiVersion: kustomize.toolkit.fluxcd.io/v1 +kind: Kustomization +metadata: + name: &app cloudnative-pg-cluster + namespace: flux-system +spec: + targetNamespace: database + commonMetadata: + labels: + app.kubernetes.io/name: *app + dependsOn: + - name: cloudnative-pg + path: ./kubernetes/main/apps/database/cloudnative-pg/cluster + prune: true + sourceRef: + kind: GitRepository + name: home-kubernetes + wait: true + interval: 30m + timeout: 5m diff --git a/kubernetes/main/apps/database/kustomization.yaml b/kubernetes/main/apps/database/kustomization.yaml new file mode 100644 index 000000000..fd1bf3475 --- /dev/null +++ b/kubernetes/main/apps/database/kustomization.yaml @@ -0,0 +1,10 @@ +--- +# yaml-language-server: $schema=https://json.schemastore.org/kustomization +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + # Pre Flux-Kustomizations + - ./namespace.yaml + # Flux-Kustomizations + - ./cloudnative-pg/ks.yaml + - ./redis/ks.yaml diff --git a/kubernetes/main/apps/database/namespace.yaml b/kubernetes/main/apps/database/namespace.yaml new file mode 100644 index 000000000..70a0cf101 --- /dev/null +++ b/kubernetes/main/apps/database/namespace.yaml @@ -0,0 +1,38 @@ +--- +apiVersion: v1 +kind: Namespace +metadata: + name: database + annotations: + kustomize.toolkit.fluxcd.io/prune: disabled + volsync.backube/privileged-movers: "true" +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/notification.toolkit.fluxcd.io/provider_v1beta3.json +apiVersion: notification.toolkit.fluxcd.io/v1beta3 +kind: Provider +metadata: + name: alert-manager + namespace: database +spec: + type: alertmanager + address: http://alertmanager-operated.observability.svc.cluster.local:9093/api/v2/alerts/ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/notification.toolkit.fluxcd.io/alert_v1beta3.json +apiVersion: notification.toolkit.fluxcd.io/v1beta3 +kind: Alert +metadata: + name: alert-manager + namespace: database +spec: + providerRef: + name: alert-manager + eventSeverity: error + eventSources: + - kind: HelmRelease + name: "*" + exclusionList: + - "error.*lookup github\\.com" + - "error.*lookup raw\\.githubusercontent\\.com" + - "dial.*tcp.*timeout" + - "waiting.*socket" + suspend: false diff --git a/kubernetes/main/apps/database/redis/app/externalsecret.yaml b/kubernetes/main/apps/database/redis/app/externalsecret.yaml new file mode 100644 index 000000000..b7f7fb9be --- /dev/null +++ b/kubernetes/main/apps/database/redis/app/externalsecret.yaml @@ -0,0 +1,19 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/external-secrets.io/externalsecret_v1beta1.json +apiVersion: external-secrets.io/v1beta1 +kind: ExternalSecret +metadata: + name: &secret redis-credentials +spec: + secretStoreRef: + kind: ClusterSecretStore + name: vault-backend + target: + name: *secret + template: + engineVersion: v2 + data: + AUTHENTIK_REDIS__PASSWORD: "{{ .REDIS_PASSWORD }}" + dataFrom: + - extract: + key: secrets/redis diff --git a/kubernetes/main/apps/database/redis/app/helmrelease.yaml b/kubernetes/main/apps/database/redis/app/helmrelease.yaml new file mode 100644 index 000000000..fb100d36f --- /dev/null +++ b/kubernetes/main/apps/database/redis/app/helmrelease.yaml @@ -0,0 +1,37 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/helm.toolkit.fluxcd.io/helmrelease_v2.json +apiVersion: helm.toolkit.fluxcd.io/v2 +kind: HelmRelease +metadata: + name: redis +spec: + interval: 30m + chart: + spec: + chart: redis + version: 20.3.0 + sourceRef: + kind: HelmRepository + name: bitnami + namespace: flux-system + maxHistory: 3 + install: + createNamespace: true + remediation: + retries: 3 + upgrade: + cleanupOnFail: true + remediation: + retries: 3 + uninstall: + keepHistory: false + values: + master: + configuration: | + databases 32 + architecture: standalone + auth: + enabled: true + sentinel: false + existingSecret: redis-credentials + existingSecretPasswordKey: AUTHENTIK_REDIS__PASSWORD diff --git a/kubernetes/main/apps/database/redis/app/kustomization.yaml b/kubernetes/main/apps/database/redis/app/kustomization.yaml new file mode 100644 index 000000000..4eed917b9 --- /dev/null +++ b/kubernetes/main/apps/database/redis/app/kustomization.yaml @@ -0,0 +1,7 @@ +--- +# yaml-language-server: $schema=https://json.schemastore.org/kustomization +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - ./externalsecret.yaml + - ./helmrelease.yaml diff --git a/kubernetes/main/apps/database/redis/ks.yaml b/kubernetes/main/apps/database/redis/ks.yaml new file mode 100644 index 000000000..69ecd0283 --- /dev/null +++ b/kubernetes/main/apps/database/redis/ks.yaml @@ -0,0 +1,22 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/kustomize.toolkit.fluxcd.io/kustomization_v1.json +apiVersion: kustomize.toolkit.fluxcd.io/v1 +kind: Kustomization +metadata: + name: &app redis + namespace: flux-system +spec: + targetNamespace: database + commonMetadata: + labels: + app.kubernetes.io/name: *app + dependsOn: + - name: external-secrets-stores + path: ./kubernetes/main/apps/database/redis/app + prune: true + sourceRef: + kind: GitRepository + name: home-kubernetes + wait: true + interval: 30m + timeout: 5m diff --git a/kubernetes/main/apps/external-secrets/external-secrets/app/helmrelease.yaml b/kubernetes/main/apps/external-secrets/external-secrets/app/helmrelease.yaml new file mode 100644 index 000000000..03ae0ec48 --- /dev/null +++ b/kubernetes/main/apps/external-secrets/external-secrets/app/helmrelease.yaml @@ -0,0 +1,43 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/helm.toolkit.fluxcd.io/helmrelease_v2.json +apiVersion: helm.toolkit.fluxcd.io/v2 +kind: HelmRelease +metadata: + name: external-secrets +spec: + interval: 30m + chart: + spec: + chart: external-secrets + version: 0.10.5 + sourceRef: + kind: HelmRepository + name: external-secrets + namespace: flux-system + install: + remediation: + retries: 3 + upgrade: + cleanupOnFail: true + remediation: + strategy: rollback + retries: 3 + values: + installCRDs: true + image: + repository: ghcr.io/external-secrets/external-secrets + webhook: + image: + repository: ghcr.io/external-secrets/external-secrets + serviceMonitor: + enabled: true + interval: 1m + certController: + image: + repository: ghcr.io/external-secrets/external-secrets + serviceMonitor: + enabled: true + interval: 1m + serviceMonitor: + enabled: true + interval: 1m diff --git a/kubernetes/main/apps/external-secrets/external-secrets/app/kustomization.yaml b/kubernetes/main/apps/external-secrets/external-secrets/app/kustomization.yaml new file mode 100644 index 000000000..17cbc72b2 --- /dev/null +++ b/kubernetes/main/apps/external-secrets/external-secrets/app/kustomization.yaml @@ -0,0 +1,6 @@ +--- +# yaml-language-server: $schema=https://json.schemastore.org/kustomization +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - ./helmrelease.yaml diff --git a/kubernetes/main/apps/external-secrets/external-secrets/ks.yaml b/kubernetes/main/apps/external-secrets/external-secrets/ks.yaml new file mode 100644 index 000000000..5cf51becc --- /dev/null +++ b/kubernetes/main/apps/external-secrets/external-secrets/ks.yaml @@ -0,0 +1,42 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/kustomize.toolkit.fluxcd.io/kustomization_v1.json +apiVersion: kustomize.toolkit.fluxcd.io/v1 +kind: Kustomization +metadata: + name: &app external-secrets + namespace: flux-system +spec: + targetNamespace: external-secrets + commonMetadata: + labels: + app.kubernetes.io/name: *app + path: ./kubernetes/main/apps/external-secrets/external-secrets/app + prune: true + sourceRef: + kind: GitRepository + name: home-kubernetes + wait: true + interval: 30m + timeout: 5m +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/kustomize.toolkit.fluxcd.io/kustomization_v1.json +apiVersion: kustomize.toolkit.fluxcd.io/v1 +kind: Kustomization +metadata: + name: &app external-secrets-stores + namespace: flux-system +spec: + targetNamespace: external-secrets + commonMetadata: + labels: + app.kubernetes.io/name: *app + dependsOn: + - name: external-secrets + path: ./kubernetes/main/apps/external-secrets/external-secrets/stores + prune: true + sourceRef: + kind: GitRepository + name: home-kubernetes + wait: true + interval: 30m + timeout: 5m diff --git a/kubernetes/main/apps/external-secrets/external-secrets/stores/clustersecretstore.yaml b/kubernetes/main/apps/external-secrets/external-secrets/stores/clustersecretstore.yaml new file mode 100644 index 000000000..3c07d0a42 --- /dev/null +++ b/kubernetes/main/apps/external-secrets/external-secrets/stores/clustersecretstore.yaml @@ -0,0 +1,19 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/external-secrets.io/clustersecretstore_v1beta1.json +apiVersion: external-secrets.io/v1beta1 +kind: ClusterSecretStore +metadata: + name: vault-backend +spec: + provider: + vault: + server: https://vault.${PUBLIC_DOMAIN}:8200 + path: secrets + version: v2 + auth: + kubernetes: + mountPath: kubernetes + role: external-secrets-operator + serviceAccountRef: + name: external-secrets + namespace: external-secrets diff --git a/kubernetes/main/apps/external-secrets/external-secrets/stores/kustomization.yaml b/kubernetes/main/apps/external-secrets/external-secrets/stores/kustomization.yaml new file mode 100644 index 000000000..87f419341 --- /dev/null +++ b/kubernetes/main/apps/external-secrets/external-secrets/stores/kustomization.yaml @@ -0,0 +1,7 @@ +--- +# yaml-language-server: $schema=https://json.schemastore.org/kustomization +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - ./clustersecretstore.yaml + diff --git a/kubernetes/main/apps/external-secrets/kustomization.yaml b/kubernetes/main/apps/external-secrets/kustomization.yaml new file mode 100644 index 000000000..8b5a7e346 --- /dev/null +++ b/kubernetes/main/apps/external-secrets/kustomization.yaml @@ -0,0 +1,9 @@ +--- +# yaml-language-server: $schema=https://json.schemastore.org/kustomization +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + # Pre Flux-Kustomizations + - ./namespace.yaml + # Flux-Kustomizations + - ./external-secrets/ks.yaml diff --git a/kubernetes/main/apps/external-secrets/namespace.yaml b/kubernetes/main/apps/external-secrets/namespace.yaml new file mode 100644 index 000000000..2a7689896 --- /dev/null +++ b/kubernetes/main/apps/external-secrets/namespace.yaml @@ -0,0 +1,37 @@ +--- +apiVersion: v1 +kind: Namespace +metadata: + name: external-secrets + labels: + kustomize.toolkit.fluxcd.io/prune: disabled +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/notification.toolkit.fluxcd.io/provider_v1beta3.json +apiVersion: notification.toolkit.fluxcd.io/v1beta3 +kind: Provider +metadata: + name: alert-manager + namespace: external-secrets +spec: + type: alertmanager + address: http://alertmanager-operated.observability.svc.cluster.local:9093/api/v2/alerts/ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/notification.toolkit.fluxcd.io/alert_v1beta3.json +apiVersion: notification.toolkit.fluxcd.io/v1beta3 +kind: Alert +metadata: + name: alert-manager + namespace: external-secrets +spec: + providerRef: + name: alert-manager + eventSeverity: error + eventSources: + - kind: HelmRelease + name: "*" + exclusionList: + - "error.*lookup github\\.com" + - "error.*lookup raw\\.githubusercontent\\.com" + - "dial.*tcp.*timeout" + - "waiting.*socket" + suspend: false diff --git a/kubernetes/main/apps/flux-system/addons/app/kustomization.yaml b/kubernetes/main/apps/flux-system/addons/app/kustomization.yaml new file mode 100644 index 000000000..feb053584 --- /dev/null +++ b/kubernetes/main/apps/flux-system/addons/app/kustomization.yaml @@ -0,0 +1,8 @@ +--- +# yaml-language-server: $schema=https://json.schemastore.org/kustomization +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - ./monitoring + - ./notifications + - ./webhooks diff --git a/kubernetes/main/apps/flux-system/addons/app/monitoring/kustomization.yaml b/kubernetes/main/apps/flux-system/addons/app/monitoring/kustomization.yaml new file mode 100644 index 000000000..247c03744 --- /dev/null +++ b/kubernetes/main/apps/flux-system/addons/app/monitoring/kustomization.yaml @@ -0,0 +1,8 @@ +--- +# yaml-language-server: $schema=https://json.schemastore.org/kustomization +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +namespace: flux-system +resources: + - ./podmonitor.yaml + - ./prometheusrule.yaml diff --git a/kubernetes/main/apps/flux-system/addons/app/monitoring/podmonitor.yaml b/kubernetes/main/apps/flux-system/addons/app/monitoring/podmonitor.yaml new file mode 100644 index 000000000..bc68a6a45 --- /dev/null +++ b/kubernetes/main/apps/flux-system/addons/app/monitoring/podmonitor.yaml @@ -0,0 +1,30 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/monitoring.coreos.com/podmonitor_v1.json +apiVersion: monitoring.coreos.com/v1 +kind: PodMonitor +metadata: + name: flux-system + namespace: flux-system + labels: + app.kubernetes.io/part-of: flux + app.kubernetes.io/component: monitoring +spec: + namespaceSelector: + matchNames: + - flux-system + selector: + matchExpressions: + - key: app + operator: In + values: + - helm-controller + - source-controller + - kustomize-controller + - notification-controller + podMetricsEndpoints: + - port: http-prom + relabelings: + # Ref: https://github.com/prometheus-operator/prometheus-operator/issues/4816 + - sourceLabels: [__meta_kubernetes_pod_phase] + action: keep + regex: Running diff --git a/kubernetes/main/apps/flux-system/addons/app/monitoring/prometheusrule.yaml b/kubernetes/main/apps/flux-system/addons/app/monitoring/prometheusrule.yaml new file mode 100644 index 000000000..4257e56de --- /dev/null +++ b/kubernetes/main/apps/flux-system/addons/app/monitoring/prometheusrule.yaml @@ -0,0 +1,32 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/monitoring.coreos.com/prometheusrule_v1.json +apiVersion: monitoring.coreos.com/v1 +kind: PrometheusRule +metadata: + name: flux-rules + namespace: flux-system +spec: + groups: + - name: flux.rules + rules: + - alert: FluxComponentAbsent + annotations: + summary: Flux component has disappeared from Prometheus target discovery. + expr: | + absent(up{job=~".*flux-system.*"} == 1) + for: 15m + labels: + severity: critical + - alert: FluxReconciliationFailure + annotations: + summary: >- + {{ $labels.kind }} {{ $labels.namespace }}/{{ $labels.name }} reconciliation + has been failing for more than 15 minutes. + expr: | + max(gotk_reconcile_condition{status="False",type="Ready"}) by (namespace, name, kind) + + + on(namespace, name, kind) (max(gotk_reconcile_condition{status="Deleted"}) + by (namespace, name, kind)) * 2 == 1 + for: 15m + labels: + severity: critical diff --git a/kubernetes/main/apps/flux-system/addons/app/notifications/github/externalsecret.yaml b/kubernetes/main/apps/flux-system/addons/app/notifications/github/externalsecret.yaml new file mode 100644 index 000000000..a141a5b43 --- /dev/null +++ b/kubernetes/main/apps/flux-system/addons/app/notifications/github/externalsecret.yaml @@ -0,0 +1,19 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/external-secrets.io/externalsecret_v1beta1.json +apiVersion: external-secrets.io/v1beta1 +kind: ExternalSecret +metadata: + name: github-token +spec: + secretStoreRef: + kind: ClusterSecretStore + name: vault-backend + target: + name: github-token-secret + template: + engineVersion: v2 + data: + token: "{{ .FLUX_GITHUB_TOKEN }}" + dataFrom: + - extract: + key: secrets/flux diff --git a/kubernetes/main/apps/flux-system/addons/app/notifications/github/kustomization.yaml b/kubernetes/main/apps/flux-system/addons/app/notifications/github/kustomization.yaml new file mode 100644 index 000000000..c6052dbc2 --- /dev/null +++ b/kubernetes/main/apps/flux-system/addons/app/notifications/github/kustomization.yaml @@ -0,0 +1,7 @@ +--- +# yaml-language-server: $schema=https://json.schemastore.org/kustomization +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - ./externalsecret.yaml + - ./notification.yaml diff --git a/kubernetes/main/apps/flux-system/addons/app/notifications/github/notification.yaml b/kubernetes/main/apps/flux-system/addons/app/notifications/github/notification.yaml new file mode 100644 index 000000000..183dce81a --- /dev/null +++ b/kubernetes/main/apps/flux-system/addons/app/notifications/github/notification.yaml @@ -0,0 +1,24 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/notification.toolkit.fluxcd.io/provider_v1beta3.json +apiVersion: notification.toolkit.fluxcd.io/v1beta3 +kind: Provider +metadata: + name: github +spec: + type: github + address: https://github.com/Darkfella91/home-ops + secretRef: + name: github-token-secret +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/notification.toolkit.fluxcd.io/alert_v1beta3.json +apiVersion: notification.toolkit.fluxcd.io/v1beta3 +kind: Alert +metadata: + name: github +spec: + providerRef: + name: github + eventSeverity: info + eventSources: + - kind: Kustomization + name: "*" diff --git a/kubernetes/main/apps/flux-system/addons/app/notifications/kustomization.yaml b/kubernetes/main/apps/flux-system/addons/app/notifications/kustomization.yaml new file mode 100644 index 000000000..08c1780f0 --- /dev/null +++ b/kubernetes/main/apps/flux-system/addons/app/notifications/kustomization.yaml @@ -0,0 +1,6 @@ +--- +# yaml-language-server: $schema=https://json.schemastore.org/kustomization +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - ./github diff --git a/kubernetes/main/apps/flux-system/addons/app/webhooks/github/externalsecret.yaml b/kubernetes/main/apps/flux-system/addons/app/webhooks/github/externalsecret.yaml new file mode 100644 index 000000000..5364853ca --- /dev/null +++ b/kubernetes/main/apps/flux-system/addons/app/webhooks/github/externalsecret.yaml @@ -0,0 +1,19 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/external-secrets.io/externalsecret_v1beta1.json +apiVersion: external-secrets.io/v1beta1 +kind: ExternalSecret +metadata: + name: github-webhook-token +spec: + secretStoreRef: + kind: ClusterSecretStore + name: vault-backend + target: + name: github-webhook-token-secret + template: + engineVersion: v2 + data: + token: "{{ .FLUX_GITHUB_WEBHOOK_TOKEN }}" + dataFrom: + - extract: + key: flux diff --git a/kubernetes/main/apps/flux-system/addons/app/webhooks/github/ingress.yaml b/kubernetes/main/apps/flux-system/addons/app/webhooks/github/ingress.yaml new file mode 100644 index 000000000..9007eba9a --- /dev/null +++ b/kubernetes/main/apps/flux-system/addons/app/webhooks/github/ingress.yaml @@ -0,0 +1,20 @@ +--- +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: webhook-receiver + annotations: + external-dns.alpha.kubernetes.io/target: external.${PUBLIC_DOMAIN} +spec: + ingressClassName: external + rules: + - host: flux-webhook.${PUBLIC_DOMAIN} + http: + paths: + - path: /hook/ + pathType: Prefix + backend: + service: + name: webhook-receiver + port: + number: 80 diff --git a/kubernetes/main/apps/flux-system/addons/app/webhooks/github/kustomization.yaml b/kubernetes/main/apps/flux-system/addons/app/webhooks/github/kustomization.yaml new file mode 100644 index 000000000..58532a27c --- /dev/null +++ b/kubernetes/main/apps/flux-system/addons/app/webhooks/github/kustomization.yaml @@ -0,0 +1,8 @@ +--- +# yaml-language-server: $schema=https://json.schemastore.org/kustomization +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - ./externalsecret.yaml + - ./ingress.yaml + - ./receiver.yaml diff --git a/kubernetes/main/apps/flux-system/addons/app/webhooks/github/receiver.yaml b/kubernetes/main/apps/flux-system/addons/app/webhooks/github/receiver.yaml new file mode 100644 index 000000000..fd67703a2 --- /dev/null +++ b/kubernetes/main/apps/flux-system/addons/app/webhooks/github/receiver.yaml @@ -0,0 +1,26 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/notification.toolkit.fluxcd.io/receiver_v1.json +apiVersion: notification.toolkit.fluxcd.io/v1 +kind: Receiver +metadata: + name: home-ops +spec: + type: github + events: + - ping + - push + secretRef: + name: github-webhook-token-secret + resources: + - apiVersion: source.toolkit.fluxcd.io/v1 + kind: GitRepository + name: home-kubernetes + namespace: flux-system + - apiVersion: kustomize.toolkit.fluxcd.io/v1 + kind: Kustomization + name: cluster + namespace: flux-system + - apiVersion: kustomize.toolkit.fluxcd.io/v1 + kind: Kustomization + name: cluster-apps + namespace: flux-system diff --git a/kubernetes/main/apps/flux-system/addons/app/webhooks/kustomization.yaml b/kubernetes/main/apps/flux-system/addons/app/webhooks/kustomization.yaml new file mode 100644 index 000000000..08c1780f0 --- /dev/null +++ b/kubernetes/main/apps/flux-system/addons/app/webhooks/kustomization.yaml @@ -0,0 +1,6 @@ +--- +# yaml-language-server: $schema=https://json.schemastore.org/kustomization +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - ./github diff --git a/kubernetes/main/apps/flux-system/addons/ks.yaml b/kubernetes/main/apps/flux-system/addons/ks.yaml new file mode 100644 index 000000000..8a2780483 --- /dev/null +++ b/kubernetes/main/apps/flux-system/addons/ks.yaml @@ -0,0 +1,22 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/kustomize.toolkit.fluxcd.io/kustomization_v1.json +apiVersion: kustomize.toolkit.fluxcd.io/v1 +kind: Kustomization +metadata: + name: &app flux-addons + namespace: flux-system +spec: + targetNamespace: flux-system + commonMetadata: + labels: + app.kubernetes.io/name: *app + dependsOn: + - name: external-secrets-stores + path: ./kubernetes/main/apps/flux-system/addons/app + prune: true + sourceRef: + kind: GitRepository + name: home-kubernetes + wait: false + interval: 30m + timeout: 5m diff --git a/kubernetes/main/apps/flux-system/kustomization.yaml b/kubernetes/main/apps/flux-system/kustomization.yaml new file mode 100644 index 000000000..95df4db76 --- /dev/null +++ b/kubernetes/main/apps/flux-system/kustomization.yaml @@ -0,0 +1,9 @@ +--- +# yaml-language-server: $schema=https://json.schemastore.org/kustomization +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + # Pre Flux-Kustomizations + - ./namespace.yaml + # Flux-Kustomizations + - ./addons/ks.yaml diff --git a/kubernetes/main/apps/flux-system/namespace.yaml b/kubernetes/main/apps/flux-system/namespace.yaml new file mode 100644 index 000000000..38c6a2adf --- /dev/null +++ b/kubernetes/main/apps/flux-system/namespace.yaml @@ -0,0 +1,46 @@ +--- +apiVersion: v1 +kind: Namespace +metadata: + name: flux-system + annotations: + kustomize.toolkit.fluxcd.io/prune: disabled + volsync.backube/privileged-movers: "true" +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/notification.toolkit.fluxcd.io/provider_v1beta3.json +apiVersion: notification.toolkit.fluxcd.io/v1beta3 +kind: Provider +metadata: + name: alert-manager + namespace: flux-system +spec: + type: alertmanager + address: http://alertmanager-operated.observability.svc.cluster.local:9093/api/v2/alerts/ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/notification.toolkit.fluxcd.io/alert_v1beta3.json +apiVersion: notification.toolkit.fluxcd.io/v1beta3 +kind: Alert +metadata: + name: alert-manager + namespace: flux-system +spec: + providerRef: + name: alert-manager + eventSeverity: error + eventSources: + - kind: GitRepository + name: "*" + - kind: HelmRelease + name: "*" + - kind: HelmRepository + name: "*" + - kind: Kustomization + name: "*" + - kind: OCIRepository + name: "*" + exclusionList: + - "error.*lookup github\\.com" + - "error.*lookup raw\\.githubusercontent\\.com" + - "dial.*tcp.*timeout" + - "waiting.*socket" + suspend: false diff --git a/kubernetes/main/apps/keycloak/crds/kustomization.yaml b/kubernetes/main/apps/keycloak/crds/kustomization.yaml new file mode 100644 index 000000000..09dfd524a --- /dev/null +++ b/kubernetes/main/apps/keycloak/crds/kustomization.yaml @@ -0,0 +1,6 @@ +--- +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - https://raw.githubusercontent.com/keycloak/keycloak-k8s-resources/26.0.5/kubernetes/keycloaks.k8s.keycloak.org-v1.yml + - https://raw.githubusercontent.com/keycloak/keycloak-k8s-resources/26.0.5/kubernetes/keycloakrealmimports.k8s.keycloak.org-v1.yml diff --git a/kubernetes/main/apps/keycloak/deployment/cr.yaml b/kubernetes/main/apps/keycloak/deployment/cr.yaml new file mode 100644 index 000000000..7482e6c36 --- /dev/null +++ b/kubernetes/main/apps/keycloak/deployment/cr.yaml @@ -0,0 +1,63 @@ +--- +apiVersion: k8s.keycloak.org/v2alpha1 +kind: Keycloak +metadata: + name: keycloak +spec: + db: + vendor: postgres + usernameSecret: + name: keycloak-initdb-secret + key: INIT_POSTGRES_USER + passwordSecret: + name: keycloak-initdb-secret + key: INIT_POSTGRES_PASS + host: postgres17-rw.database.svc.cluster.local + database: keycloak + port: 5432 + schema: public + instances: 1 + image: ghcr.io/darkfella91/keycloak-image:26.0.5@sha256:368e6a03ec617c62e30c5df925734f7eaa4d9bb78592e0e6aaffc7148775b6b1 + startOptimized: true + additionalOptions: + - name: https-protocols + value: "TLSv1.3" + bootstrapAdmin: + user: + secret: keycloak-admin-credentials + proxy: + headers: xforwarded + hostname: + hostname: https://auth.${PUBLIC_DOMAIN} + http: + httpEnabled: false + httpsPort: 443 + tlsSecret: keycloak-tls + ingress: + enabled: false + unsupported: + podTemplate: + spec: + containers: + - securityContext: + privileged: false + allowPrivilegeEscalation: false + readOnlyRootFilesystem: false + capabilities: { drop: ["ALL"] } + initContainers: + - name: init-db + image: "ghcr.io/onedr0p/postgres-init:16.4@sha256:e41c745b54485341e00efbd27556f0717623a119f0d5107e5ff831aa1322c76f" + imagePullPolicy: IfNotPresent + envFrom: + - secretRef: + name: keycloak-initdb-secret + securityContext: + runAsUser: 10002 + runAsGroup: 10002 + privileged: false + allowPrivilegeEscalation: false + readOnlyRootFilesystem: true + capabilities: { drop: ["ALL"] } + securityContext: + runAsNonRoot: true + seccompProfile: { type: RuntimeDefault } diff --git a/kubernetes/main/apps/keycloak/deployment/externalsecret.yaml b/kubernetes/main/apps/keycloak/deployment/externalsecret.yaml new file mode 100644 index 000000000..6d5e9c304 --- /dev/null +++ b/kubernetes/main/apps/keycloak/deployment/externalsecret.yaml @@ -0,0 +1,74 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/external-secrets.io/externalsecret_v1beta1.json +apiVersion: external-secrets.io/v1beta1 +kind: ExternalSecret +metadata: + name: &secret keycloak-initdb-secret +spec: + secretStoreRef: + kind: ClusterSecretStore + name: vault-backend + target: + name: *secret + template: + engineVersion: v2 + data: + INIT_POSTGRES_DBNAME: keycloak + INIT_POSTGRES_HOST: postgres17-rw.database.svc.cluster.local + INIT_POSTGRES_USER: "{{ .KEYCLOAK_POSTGRES_USER }}" + INIT_POSTGRES_PASS: "{{ .KEYCLOAK_POSTGRES_PASS }}" + INIT_POSTGRES_SUPER_PASS: "{{ .POSTGRES_SUPER_PASS }}" + dataFrom: + - extract: + key: secrets/cloudnative-pg +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/external-secrets.io/externalsecret_v1beta1.json +apiVersion: external-secrets.io/v1beta1 +kind: ExternalSecret +metadata: + name: keycloak-tls +spec: + secretStoreRef: + kind: ClusterSecretStore + name: vault-backend + target: + name: keycloak-tls + creationPolicy: Orphan + template: + engineVersion: v2 + type: kubernetes.io/tls + metadata: + annotations: + cert-manager.io/alt-names: 'auth.${PUBLIC_DOMAIN},auth-admin.${PUBLIC_DOMAIN}' + cert-manager.io/certificate-name: keycloak-tls + cert-manager.io/ip-sans: "" + cert-manager.io/issuer-group: "" + cert-manager.io/issuer-kind: ClusterIssuer + cert-manager.io/issuer-name: zerossl-prod + cert-manager.io/uri-sans: "" + labels: + controller.cert-manager.io/fao: "true" + dataFrom: + - extract: + key: secrets/certificates/keycloak + decodingStrategy: Auto +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/external-secrets.io/externalsecret_v1beta1.json +apiVersion: external-secrets.io/v1beta1 +kind: ExternalSecret +metadata: + name: &secret keycloak-admin-credentials +spec: + secretStoreRef: + kind: ClusterSecretStore + name: vault-backend + target: + name: *secret + template: + engineVersion: v2 + data: + username: "{{ .ADMIN_USERNAME }}" + password: "{{ .ADMIN_PASSWORD }}" + dataFrom: + - extract: + key: secrets/keycloak diff --git a/kubernetes/main/apps/keycloak/deployment/ingress.yaml b/kubernetes/main/apps/keycloak/deployment/ingress.yaml new file mode 100644 index 000000000..bc3bbd848 --- /dev/null +++ b/kubernetes/main/apps/keycloak/deployment/ingress.yaml @@ -0,0 +1,31 @@ +--- +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: keycloak-ingress + namespace: idp + annotations: + external-dns.alpha.kubernetes.io/target: external.${PUBLIC_DOMAIN} + nginx.ingress.kubernetes.io/enable-modsecurity: "true" + nginx.ingress.kubernetes.io/backend-protocol: "HTTPS" + nginx.ingress.kubernetes.io/modsecurity-snippet: | + Include /etc/nginx/modsecurity/modsecurity.conf + Include /etc/nginx/owasp-modsecurity-crs/nginx-modsecurity.conf + SecRuleUpdateTargetById 942100 "!ARGS:password" + SecRuleUpdateTargetById 932270 "!ARGS:code_verifier" + SecRuleUpdateTargetById 932130 "!ARGS:json.array_0.description" + SecPcreMatchLimit 5000 + SecPcreMatchLimitRecursion 5000 +spec: + ingressClassName: external + rules: + - host: auth.${PUBLIC_DOMAIN} + http: + paths: + - path: / + pathType: Prefix + backend: + service: + name: keycloak-service + port: + number: 443 diff --git a/kubernetes/main/apps/keycloak/deployment/kustomization.yaml b/kubernetes/main/apps/keycloak/deployment/kustomization.yaml new file mode 100644 index 000000000..1af901a21 --- /dev/null +++ b/kubernetes/main/apps/keycloak/deployment/kustomization.yaml @@ -0,0 +1,8 @@ +--- +# yaml-language-server: $schema=https://json.schemastore.org/kustomization +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - ./ingress.yaml + - ./externalsecret.yaml + - ./cr.yaml diff --git a/kubernetes/main/apps/keycloak/ks.yaml b/kubernetes/main/apps/keycloak/ks.yaml new file mode 100644 index 000000000..c77ec8201 --- /dev/null +++ b/kubernetes/main/apps/keycloak/ks.yaml @@ -0,0 +1,66 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/kustomize.toolkit.fluxcd.io/kustomization_v1.json +apiVersion: kustomize.toolkit.fluxcd.io/v1 +kind: Kustomization +metadata: + name: &app keycloak-crds + namespace: flux-system +spec: + targetNamespace: idp + commonMetadata: + labels: + app.kubernetes.io/name: *app + path: ./kubernetes/main/apps/keycloak/crds + prune: true + sourceRef: + kind: GitRepository + name: home-kubernetes + wait: true + interval: 30m + timeout: 5m +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/kustomize.toolkit.fluxcd.io/kustomization_v1.json +apiVersion: kustomize.toolkit.fluxcd.io/v1 +kind: Kustomization +metadata: + name: &app keycloak-operator + namespace: flux-system +spec: + dependsOn: + - name: keycloak-crds + targetNamespace: idp + commonMetadata: + labels: + app.kubernetes.io/name: *app + path: ./kubernetes/main/apps/keycloak/operator + prune: true + sourceRef: + kind: GitRepository + name: home-kubernetes + wait: true + interval: 30m + timeout: 5m +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/kustomize.toolkit.fluxcd.io/kustomization_v1.json +apiVersion: kustomize.toolkit.fluxcd.io/v1 +kind: Kustomization +metadata: + name: &app keycloak-deployment + namespace: flux-system +spec: + dependsOn: + - name: external-secrets-stores + - name: cloudnative-pg-cluster + - name: keycloak-operator + targetNamespace: idp + commonMetadata: + labels: + app.kubernetes.io/name: *app + path: ./kubernetes/main/apps/keycloak/deployment + prune: true + sourceRef: + kind: GitRepository + name: home-kubernetes + wait: false + interval: 30m + timeout: 5m diff --git a/kubernetes/main/apps/keycloak/kustomization.yaml b/kubernetes/main/apps/keycloak/kustomization.yaml new file mode 100644 index 000000000..ad2040382 --- /dev/null +++ b/kubernetes/main/apps/keycloak/kustomization.yaml @@ -0,0 +1,8 @@ +--- +# yaml-language-server: $schema=https://json.schemastore.org/kustomization +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + # Pre Flux-Kustomizations + - ./namespace.yaml + - ./ks.yaml diff --git a/kubernetes/main/apps/keycloak/namespace.yaml b/kubernetes/main/apps/keycloak/namespace.yaml new file mode 100644 index 000000000..8c452403a --- /dev/null +++ b/kubernetes/main/apps/keycloak/namespace.yaml @@ -0,0 +1,5 @@ +--- +apiVersion: v1 +kind: Namespace +metadata: + name: idp diff --git a/kubernetes/main/apps/keycloak/operator/kustomization.yaml b/kubernetes/main/apps/keycloak/operator/kustomization.yaml new file mode 100644 index 000000000..9018944b8 --- /dev/null +++ b/kubernetes/main/apps/keycloak/operator/kustomization.yaml @@ -0,0 +1,5 @@ +--- +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - https://raw.githubusercontent.com/keycloak/keycloak-k8s-resources/26.0.5/kubernetes/kubernetes.yml diff --git a/kubernetes/main/apps/kube-system/cilium/app/helm-values.yaml b/kubernetes/main/apps/kube-system/cilium/app/helm-values.yaml new file mode 100644 index 000000000..4d57e7c72 --- /dev/null +++ b/kubernetes/main/apps/kube-system/cilium/app/helm-values.yaml @@ -0,0 +1,55 @@ +--- +autoDirectNodeRoutes: true +bandwidthManager: + enabled: true + bbr: true +bpf: + masquerade: true + tproxy: true +cgroup: + automount: + enabled: false + hostRoot: /sys/fs/cgroup +cluster: + id: 1 + name: main +endpointRoutes: + enabled: true +hubble: + enabled: false +ipam: + mode: kubernetes +ipv4NativeRoutingCIDR: 172.16.0.0/16 +k8sServiceHost: 127.0.0.1 +k8sServicePort: 7445 +kubeProxyReplacement: true +kubeProxyReplacementHealthzBindAddr: 0.0.0.0:10256 +l2announcements: + enabled: true +loadBalancer: + algorithm: maglev + mode: dsr +localRedirectPolicy: true +operator: + replicas: 1 + rollOutPods: true +rollOutCiliumPods: true +routingMode: native +securityContext: + capabilities: + ciliumAgent: + - CHOWN + - KILL + - NET_ADMIN + - NET_RAW + - IPC_LOCK + - SYS_ADMIN + - SYS_RESOURCE + - DAC_OVERRIDE + - FOWNER + - SETGID + - SETUID + cleanCiliumState: + - NET_ADMIN + - SYS_ADMIN + - SYS_RESOURCE diff --git a/kubernetes/main/apps/kube-system/cilium/app/helmrelease.yaml b/kubernetes/main/apps/kube-system/cilium/app/helmrelease.yaml new file mode 100644 index 000000000..683d616b8 --- /dev/null +++ b/kubernetes/main/apps/kube-system/cilium/app/helmrelease.yaml @@ -0,0 +1,78 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/helm.toolkit.fluxcd.io/helmrelease_v2.json +apiVersion: helm.toolkit.fluxcd.io/v2 +kind: HelmRelease +metadata: + name: cilium +spec: + interval: 30m + chart: + spec: + chart: cilium + version: 1.16.3 + sourceRef: + kind: HelmRepository + name: cilium + namespace: flux-system + install: + remediation: + retries: 3 + upgrade: + cleanupOnFail: true + remediation: + strategy: rollback + retries: 3 + valuesFrom: + - kind: ConfigMap + name: cilium-helm-values + values: + hubble: + enabled: true + metrics: + enabled: + - dns:query + - drop + - tcp + - flow + - port-distribution + - icmp + - http + serviceMonitor: + enabled: true + dashboards: + enabled: true + annotations: + grafana_folder: Cilium + relay: + enabled: true + rollOutPods: true + prometheus: + serviceMonitor: + enabled: true + ui: + enabled: true + rollOutPods: true + ingress: + enabled: true + annotations: + external-dns.alpha.kubernetes.io/target: internal.${PUBLIC_DOMAIN} + className: internal + hosts: ["hubble.${PUBLIC_DOMAIN}"] + operator: + prometheus: + enabled: true + serviceMonitor: + enabled: true + dashboards: + enabled: true + annotations: + grafana_folder: Cilium + prometheus: + enabled: true + serviceMonitor: + enabled: true + trustCRDsExist: true + dashboards: + enabled: true + annotations: + grafana_folder: Cilium diff --git a/kubernetes/main/apps/kube-system/cilium/app/kustomization.yaml b/kubernetes/main/apps/kube-system/cilium/app/kustomization.yaml new file mode 100644 index 000000000..25781ef11 --- /dev/null +++ b/kubernetes/main/apps/kube-system/cilium/app/kustomization.yaml @@ -0,0 +1,12 @@ +--- +# yaml-language-server: $schema=https://json.schemastore.org/kustomization +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - ./helmrelease.yaml +configMapGenerator: + - name: cilium-helm-values + files: + - values.yaml=./helm-values.yaml +configurations: + - kustomizeconfig.yaml diff --git a/kubernetes/main/apps/kube-system/cilium/app/kustomizeconfig.yaml b/kubernetes/main/apps/kube-system/cilium/app/kustomizeconfig.yaml new file mode 100644 index 000000000..58f92ba15 --- /dev/null +++ b/kubernetes/main/apps/kube-system/cilium/app/kustomizeconfig.yaml @@ -0,0 +1,7 @@ +--- +nameReference: + - kind: ConfigMap + version: v1 + fieldSpecs: + - path: spec/valuesFrom/name + kind: HelmRelease diff --git a/kubernetes/main/apps/kube-system/cilium/config/kustomization.yaml b/kubernetes/main/apps/kube-system/cilium/config/kustomization.yaml new file mode 100644 index 000000000..62bc7b9d1 --- /dev/null +++ b/kubernetes/main/apps/kube-system/cilium/config/kustomization.yaml @@ -0,0 +1,7 @@ +--- +# yaml-language-server: $schema=https://json.schemastore.org/kustomization +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - ./l2.yaml + - ./lrp.yaml diff --git a/kubernetes/main/apps/kube-system/cilium/config/l2.yaml b/kubernetes/main/apps/kube-system/cilium/config/l2.yaml new file mode 100644 index 000000000..e77b4cd29 --- /dev/null +++ b/kubernetes/main/apps/kube-system/cilium/config/l2.yaml @@ -0,0 +1,24 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/cilium.io/ciliuml2announcementpolicy_v2alpha1.json +apiVersion: cilium.io/v2alpha1 +kind: CiliumL2AnnouncementPolicy +metadata: + name: l2-policy +spec: + loadBalancerIPs: true + interfaces: ["^eth[0-9]+"] + nodeSelector: + matchLabels: + kubernetes.io/os: linux +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/cilium.io/ciliumloadbalancerippool_v2alpha1.json +apiVersion: cilium.io/v2alpha1 +kind: CiliumLoadBalancerIPPool +metadata: + name: l2-pool +spec: + allowFirstLastIPs: "Yes" + blocks: + - # Controller VIP: 192.168.91.21 + start: 192.168.91.91 + stop: 192.168.91.99 diff --git a/kubernetes/main/apps/kube-system/cilium/config/lrp.yaml b/kubernetes/main/apps/kube-system/cilium/config/lrp.yaml new file mode 100644 index 000000000..2de9e1dfb --- /dev/null +++ b/kubernetes/main/apps/kube-system/cilium/config/lrp.yaml @@ -0,0 +1,22 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/cilium.io/ciliumlocalredirectpolicy_v2.json +apiVersion: cilium.io/v2 +kind: CiliumLocalRedirectPolicy +metadata: + name: kube-dns +spec: + redirectFrontend: + serviceMatcher: + serviceName: kube-dns + namespace: kube-system + redirectBackend: + localEndpointSelector: + matchLabels: + k8s-app: kube-dns + toPorts: + - port: "53" + name: dns + protocol: UDP + - port: "53" + name: dns-tcp + protocol: TCP diff --git a/kubernetes/main/apps/kube-system/cilium/ks.yaml b/kubernetes/main/apps/kube-system/cilium/ks.yaml new file mode 100644 index 000000000..e416f8e66 --- /dev/null +++ b/kubernetes/main/apps/kube-system/cilium/ks.yaml @@ -0,0 +1,42 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/kustomize.toolkit.fluxcd.io/kustomization_v1.json +apiVersion: kustomize.toolkit.fluxcd.io/v1 +kind: Kustomization +metadata: + name: &app cilium + namespace: flux-system +spec: + targetNamespace: kube-system + commonMetadata: + labels: + app.kubernetes.io/name: *app + path: ./kubernetes/main/apps/kube-system/cilium/app + prune: false # never should be deleted + sourceRef: + kind: GitRepository + name: home-kubernetes + wait: true + interval: 30m + timeout: 5m +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/kustomize.toolkit.fluxcd.io/kustomization_v1.json +apiVersion: kustomize.toolkit.fluxcd.io/v1 +kind: Kustomization +metadata: + name: &app cilium-config + namespace: flux-system +spec: + targetNamespace: kube-system + commonMetadata: + labels: + app.kubernetes.io/name: *app + dependsOn: + - name: cilium + path: ./kubernetes/main/apps/kube-system/cilium/config + prune: false # never should be deleted + sourceRef: + kind: GitRepository + name: home-kubernetes + wait: false + interval: 30m + timeout: 5m diff --git a/kubernetes/main/apps/kube-system/coredns/app/helm-values.yaml b/kubernetes/main/apps/kube-system/coredns/app/helm-values.yaml new file mode 100644 index 000000000..0208d286e --- /dev/null +++ b/kubernetes/main/apps/kube-system/coredns/app/helm-values.yaml @@ -0,0 +1,53 @@ +--- +fullnameOverride: coredns +replicaCount: 2 +rollingUpdate: + maxSurge: 1 +k8sAppLabelOverride: kube-dns +serviceAccount: + create: true +service: + name: kube-dns + clusterIP: 172.17.0.10 +servers: + - zones: + - zone: . + scheme: dns:// + use_tcp: true + port: 53 + plugins: + - name: errors + - name: health + configBlock: |- + lameduck 5s + - name: ready + - name: log + configBlock: |- + class error + - name: prometheus + parameters: 0.0.0.0:9153 + - name: kubernetes + parameters: cluster.local in-addr.arpa ip6.arpa + configBlock: |- + pods insecure + fallthrough in-addr.arpa ip6.arpa + - name: forward + parameters: . /etc/resolv.conf + - name: cache + parameters: 30 + - name: loop + - name: reload + - name: loadbalance +affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: node-role.kubernetes.io/control-plane + operator: Exists +tolerations: + - key: CriticalAddonsOnly + operator: Exists + - key: node-role.kubernetes.io/control-plane + operator: Exists + effect: NoSchedule diff --git a/kubernetes/main/apps/kube-system/coredns/app/helmrelease.yaml b/kubernetes/main/apps/kube-system/coredns/app/helmrelease.yaml new file mode 100644 index 000000000..ce31f06de --- /dev/null +++ b/kubernetes/main/apps/kube-system/coredns/app/helmrelease.yaml @@ -0,0 +1,27 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/helm.toolkit.fluxcd.io/helmrelease_v2.json +apiVersion: helm.toolkit.fluxcd.io/v2 +kind: HelmRelease +metadata: + name: coredns +spec: + interval: 30m + chart: + spec: + chart: coredns + version: 1.36.1 + sourceRef: + kind: HelmRepository + name: coredns + namespace: flux-system + install: + remediation: + retries: 3 + upgrade: + cleanupOnFail: true + remediation: + strategy: rollback + retries: 3 + valuesFrom: + - kind: ConfigMap + name: coredns-helm-values diff --git a/kubernetes/main/apps/kube-system/coredns/app/kustomization.yaml b/kubernetes/main/apps/kube-system/coredns/app/kustomization.yaml new file mode 100644 index 000000000..39444bbd4 --- /dev/null +++ b/kubernetes/main/apps/kube-system/coredns/app/kustomization.yaml @@ -0,0 +1,12 @@ +--- +# yaml-language-server: $schema=https://json.schemastore.org/kustomization +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - ./helmrelease.yaml +configMapGenerator: + - name: coredns-helm-values + files: + - values.yaml=./helm-values.yaml +configurations: + - kustomizeconfig.yaml diff --git a/kubernetes/main/apps/kube-system/coredns/app/kustomizeconfig.yaml b/kubernetes/main/apps/kube-system/coredns/app/kustomizeconfig.yaml new file mode 100644 index 000000000..58f92ba15 --- /dev/null +++ b/kubernetes/main/apps/kube-system/coredns/app/kustomizeconfig.yaml @@ -0,0 +1,7 @@ +--- +nameReference: + - kind: ConfigMap + version: v1 + fieldSpecs: + - path: spec/valuesFrom/name + kind: HelmRelease diff --git a/kubernetes/main/apps/kube-system/coredns/ks.yaml b/kubernetes/main/apps/kube-system/coredns/ks.yaml new file mode 100644 index 000000000..269f52ede --- /dev/null +++ b/kubernetes/main/apps/kube-system/coredns/ks.yaml @@ -0,0 +1,20 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/kustomize.toolkit.fluxcd.io/kustomization_v1.json +apiVersion: kustomize.toolkit.fluxcd.io/v1 +kind: Kustomization +metadata: + name: &app coredns + namespace: flux-system +spec: + targetNamespace: kube-system + commonMetadata: + labels: + app.kubernetes.io/name: *app + path: ./kubernetes/main/apps/kube-system/coredns/app + prune: false # never should be deleted + sourceRef: + kind: GitRepository + name: home-kubernetes + wait: false + interval: 30m + timeout: 5m diff --git a/kubernetes/main/apps/kube-system/descheduler/app/helmrelease.yaml b/kubernetes/main/apps/kube-system/descheduler/app/helmrelease.yaml new file mode 100644 index 000000000..cdaf203da --- /dev/null +++ b/kubernetes/main/apps/kube-system/descheduler/app/helmrelease.yaml @@ -0,0 +1,64 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/helm.toolkit.fluxcd.io/helmrelease_v2.json +apiVersion: helm.toolkit.fluxcd.io/v2 +kind: HelmRelease +metadata: + name: descheduler +spec: + interval: 30m + chart: + spec: + chart: descheduler + version: 0.31.0 + sourceRef: + kind: HelmRepository + name: descheduler + namespace: flux-system + install: + remediation: + retries: 3 + upgrade: + cleanupOnFail: true + remediation: + strategy: rollback + retries: 3 + values: + replicas: 1 + kind: Deployment + deschedulerPolicyAPIVersion: descheduler/v1alpha2 + deschedulerPolicy: + profiles: + - name: Default + pluginConfig: + - name: DefaultEvictor + args: + evictFailedBarePods: true + evictLocalStoragePods: true + evictSystemCriticalPods: true + nodeFit: true + - name: RemovePodsViolatingInterPodAntiAffinity + - name: RemovePodsViolatingNodeAffinity + args: + nodeAffinityType: + - requiredDuringSchedulingIgnoredDuringExecution + - name: RemovePodsViolatingNodeTaints + - name: RemovePodsViolatingTopologySpreadConstraint + args: + constraints: + - DoNotSchedule + - ScheduleAnyway + plugins: + balance: + enabled: + - RemovePodsViolatingTopologySpreadConstraint + deschedule: + enabled: + - RemovePodsViolatingInterPodAntiAffinity + - RemovePodsViolatingNodeAffinity + - RemovePodsViolatingNodeTaints + service: + enabled: true + serviceMonitor: + enabled: true + leaderElection: + enabled: true diff --git a/kubernetes/main/apps/kube-system/descheduler/app/kustomization.yaml b/kubernetes/main/apps/kube-system/descheduler/app/kustomization.yaml new file mode 100644 index 000000000..17cbc72b2 --- /dev/null +++ b/kubernetes/main/apps/kube-system/descheduler/app/kustomization.yaml @@ -0,0 +1,6 @@ +--- +# yaml-language-server: $schema=https://json.schemastore.org/kustomization +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - ./helmrelease.yaml diff --git a/kubernetes/main/apps/kube-system/descheduler/ks.yaml b/kubernetes/main/apps/kube-system/descheduler/ks.yaml new file mode 100644 index 000000000..6bb42a6d9 --- /dev/null +++ b/kubernetes/main/apps/kube-system/descheduler/ks.yaml @@ -0,0 +1,20 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/kustomize.toolkit.fluxcd.io/kustomization_v1.json +apiVersion: kustomize.toolkit.fluxcd.io/v1 +kind: Kustomization +metadata: + name: &app descheduler + namespace: flux-system +spec: + targetNamespace: kube-system + commonMetadata: + labels: + app.kubernetes.io/name: *app + path: ./kubernetes/main/apps/kube-system/descheduler/app + prune: true + sourceRef: + kind: GitRepository + name: home-kubernetes + wait: false + interval: 30m + timeout: 5m diff --git a/kubernetes/main/apps/kube-system/generic-device-plugin.yaml b/kubernetes/main/apps/kube-system/generic-device-plugin.yaml new file mode 100644 index 000000000..12adbd319 --- /dev/null +++ b/kubernetes/main/apps/kube-system/generic-device-plugin.yaml @@ -0,0 +1,60 @@ +# generic-device-plugin.yaml +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: generic-device-plugin + namespace: kube-system + labels: + app.kubernetes.io/name: generic-device-plugin +spec: + selector: + matchLabels: + app.kubernetes.io/name: generic-device-plugin + template: + metadata: + labels: + app.kubernetes.io/name: generic-device-plugin + spec: + priorityClassName: system-node-critical + tolerations: + - operator: "Exists" + effect: "NoExecute" + - operator: "Exists" + effect: "NoSchedule" + containers: + - image: squat/generic-device-plugin + args: + - --device + - | + name: tun + groups: + - count: 1000 + paths: + - path: /dev/net/tun + name: generic-device-plugin + resources: + requests: + cpu: 50m + memory: 10Mi + limits: + cpu: 50m + memory: 20Mi + ports: + - containerPort: 8080 + name: http + securityContext: + privileged: true + volumeMounts: + - name: device-plugin + mountPath: /var/lib/kubelet/device-plugins + - name: dev + mountPath: /dev + volumes: + - name: device-plugin + hostPath: + path: /var/lib/kubelet/device-plugins + - name: dev + hostPath: + path: /dev + updateStrategy: + type: RollingUpdate diff --git a/kubernetes/main/apps/kube-system/kubelet-csr-approver/app/helm-values.yaml b/kubernetes/main/apps/kube-system/kubelet-csr-approver/app/helm-values.yaml new file mode 100644 index 000000000..c737caff1 --- /dev/null +++ b/kubernetes/main/apps/kube-system/kubelet-csr-approver/app/helm-values.yaml @@ -0,0 +1,4 @@ +--- +replicas: 1 +providerRegex: ^k8s-\d$ +bypassDnsResolution: true diff --git a/kubernetes/main/apps/kube-system/kubelet-csr-approver/app/helmrelease.yaml b/kubernetes/main/apps/kube-system/kubelet-csr-approver/app/helmrelease.yaml new file mode 100644 index 000000000..b8146b0bb --- /dev/null +++ b/kubernetes/main/apps/kube-system/kubelet-csr-approver/app/helmrelease.yaml @@ -0,0 +1,32 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/helm.toolkit.fluxcd.io/helmrelease_v2.json +apiVersion: helm.toolkit.fluxcd.io/v2 +kind: HelmRelease +metadata: + name: kubelet-csr-approver +spec: + interval: 30m + chart: + spec: + chart: kubelet-csr-approver + version: 1.2.3 + sourceRef: + kind: HelmRepository + name: postfinance + namespace: flux-system + install: + remediation: + retries: 3 + upgrade: + cleanupOnFail: true + remediation: + strategy: rollback + retries: 3 + valuesFrom: + - kind: ConfigMap + name: kubelet-csr-approver-helm-values + values: + metrics: + enable: true + serviceMonitor: + enabled: true diff --git a/kubernetes/main/apps/kube-system/kubelet-csr-approver/app/kustomization.yaml b/kubernetes/main/apps/kube-system/kubelet-csr-approver/app/kustomization.yaml new file mode 100644 index 000000000..30dddafcb --- /dev/null +++ b/kubernetes/main/apps/kube-system/kubelet-csr-approver/app/kustomization.yaml @@ -0,0 +1,11 @@ +--- +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - ./helmrelease.yaml +configMapGenerator: + - name: kubelet-csr-approver-helm-values + files: + - values.yaml=./helm-values.yaml +configurations: + - kustomizeconfig.yaml diff --git a/kubernetes/main/apps/kube-system/kubelet-csr-approver/app/kustomizeconfig.yaml b/kubernetes/main/apps/kube-system/kubelet-csr-approver/app/kustomizeconfig.yaml new file mode 100644 index 000000000..58f92ba15 --- /dev/null +++ b/kubernetes/main/apps/kube-system/kubelet-csr-approver/app/kustomizeconfig.yaml @@ -0,0 +1,7 @@ +--- +nameReference: + - kind: ConfigMap + version: v1 + fieldSpecs: + - path: spec/valuesFrom/name + kind: HelmRelease diff --git a/kubernetes/main/apps/kube-system/kubelet-csr-approver/ks.yaml b/kubernetes/main/apps/kube-system/kubelet-csr-approver/ks.yaml new file mode 100644 index 000000000..507320ba6 --- /dev/null +++ b/kubernetes/main/apps/kube-system/kubelet-csr-approver/ks.yaml @@ -0,0 +1,19 @@ +--- +apiVersion: kustomize.toolkit.fluxcd.io/v1 +kind: Kustomization +metadata: + name: &app kubelet-csr-approver + namespace: flux-system +spec: + targetNamespace: kube-system + commonMetadata: + labels: + app.kubernetes.io/name: *app + path: ./kubernetes/main/apps/kube-system/kubelet-csr-approver/app + prune: false # never should be deleted + sourceRef: + kind: GitRepository + name: home-kubernetes + wait: false + interval: 30m + timeout: 5m diff --git a/kubernetes/main/apps/kube-system/kustomization.yaml b/kubernetes/main/apps/kube-system/kustomization.yaml new file mode 100644 index 000000000..05725b621 --- /dev/null +++ b/kubernetes/main/apps/kube-system/kustomization.yaml @@ -0,0 +1,18 @@ +--- +# yaml-language-server: $schema=https://json.schemastore.org/kustomization +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + # Pre Flux-Kustomizations + - ./namespace.yaml + # Flux-Kustomizations + - ./coredns/ks.yaml + - ./cilium/ks.yaml + - ./descheduler/ks.yaml + - ./kubelet-csr-approver/ks.yaml + - ./metrics-server/ks.yaml + - ./reloader/ks.yaml + - ./reflector/ks.yaml + - ./topolvm/ks.yaml + - ./generic-device-plugin.yaml + - ./nvidia-device-plugin/ks.yaml diff --git a/kubernetes/main/apps/kube-system/lvm-disk-watcher/app/helm-values.yaml b/kubernetes/main/apps/kube-system/lvm-disk-watcher/app/helm-values.yaml new file mode 100644 index 000000000..997fad46e --- /dev/null +++ b/kubernetes/main/apps/kube-system/lvm-disk-watcher/app/helm-values.yaml @@ -0,0 +1,85 @@ +image: + repository: darkfella/lvm-disk-watcher + pullPolicy: IfNotPresent + tag: v0.26.0@sha256:2d4164b4d869991e4e2bb154c8442192883f1c267931d25229c68054245a23a2 + +service: + main: + enabled: false + ports: + main: + enabled: false + +workload: + main: + type: DaemonSet + podSpec: + containers: + main: + env: + NODE_NAME: + fieldRef: + fieldPath: spec.nodeName + probes: + liveness: + enabled: false + readiness: + enabled: false + startup: + enabled: false + +configmap: + disk-config: + enabled: true + data: + disk-config: | + k8s-0: /dev/sdb + +persistence: + disk-config: + enabled: true + type: configmap + objectName: disk-config + defaultMode: "0777" + items: + - key: disk-config + path: disk-config + targetSelector: + main: + main: + subPath: disk-config + mountPath: /config/disk-config + readOnly: true + + dev: + enabled: true + type: hostPath + mountPath: /dev + hostPath: /dev + +securityContext: + container: + runAsUser: 0 + runAsGroup: 0 + allowPrivilegeEscalation: true + readOnlyRootFilesystem: false + privileged: true + capabilities: + drop: + - ALL + add: + [ + "SYS_ADMIN", + "MKNOD", + "SYS_RAWIO", + "SYS_PTRACE", + "NET_ADMIN", + "SYS_MODULE", + ] + pod: + fsGroup: 0 + fsGroupChangePolicy: OnRootMismatch + +portal: + open: + enabled: false diff --git a/kubernetes/main/apps/kube-system/lvm-disk-watcher/app/helmrelease.yaml b/kubernetes/main/apps/kube-system/lvm-disk-watcher/app/helmrelease.yaml new file mode 100644 index 000000000..ed860ea16 --- /dev/null +++ b/kubernetes/main/apps/kube-system/lvm-disk-watcher/app/helmrelease.yaml @@ -0,0 +1,27 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/helm.toolkit.fluxcd.io/helmrelease_v2.json +apiVersion: helm.toolkit.fluxcd.io/v2 +kind: HelmRelease +metadata: + name: lvm-disk-watcher +spec: + interval: 30m + chart: + spec: + chart: lvm-disk-watcher + version: 2.0.4 + sourceRef: + kind: HelmRepository + name: truecharts + namespace: flux-system + install: + remediation: + retries: 3 + upgrade: + cleanupOnFail: true + remediation: + strategy: rollback + retries: 3 + valuesFrom: + - kind: ConfigMap + name: lvm-disk-watcher-helm-values diff --git a/kubernetes/main/apps/kube-system/lvm-disk-watcher/app/kustomization.yaml b/kubernetes/main/apps/kube-system/lvm-disk-watcher/app/kustomization.yaml new file mode 100644 index 000000000..a84d1ea07 --- /dev/null +++ b/kubernetes/main/apps/kube-system/lvm-disk-watcher/app/kustomization.yaml @@ -0,0 +1,11 @@ +--- +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - ./helmrelease.yaml +configMapGenerator: + - name: lvm-disk-watcher-helm-values + files: + - values.yaml=./helm-values.yaml +configurations: + - kustomizeconfig.yaml diff --git a/kubernetes/main/apps/kube-system/lvm-disk-watcher/ks.yaml b/kubernetes/main/apps/kube-system/lvm-disk-watcher/ks.yaml new file mode 100644 index 000000000..4ad16ee83 --- /dev/null +++ b/kubernetes/main/apps/kube-system/lvm-disk-watcher/ks.yaml @@ -0,0 +1,19 @@ +--- +apiVersion: kustomize.toolkit.fluxcd.io/v1 +kind: Kustomization +metadata: + name: &app lvm-disk-watcher + namespace: flux-system +spec: + targetNamespace: kube-system + commonMetadata: + labels: + app.kubernetes.io/name: *app + path: ./kubernetes/main/apps/kube-system/lvm-disk-watcher/app + prune: true # never should be deleted + sourceRef: + kind: GitRepository + name: home-kubernetes + wait: false + interval: 30m + timeout: 5m diff --git a/kubernetes/main/apps/kube-system/metrics-server/app/helmrelease.yaml b/kubernetes/main/apps/kube-system/metrics-server/app/helmrelease.yaml new file mode 100644 index 000000000..78b197d29 --- /dev/null +++ b/kubernetes/main/apps/kube-system/metrics-server/app/helmrelease.yaml @@ -0,0 +1,33 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/helm.toolkit.fluxcd.io/helmrelease_v2.json +apiVersion: helm.toolkit.fluxcd.io/v2 +kind: HelmRelease +metadata: + name: metrics-server +spec: + interval: 30m + chart: + spec: + chart: metrics-server + version: 3.12.2 + sourceRef: + kind: HelmRepository + name: metrics-server + namespace: flux-system + install: + remediation: + retries: 3 + upgrade: + cleanupOnFail: true + remediation: + strategy: rollback + retries: 3 + values: + args: + - --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname + - --kubelet-use-node-status-port + - --metric-resolution=15s + metrics: + enabled: true + serviceMonitor: + enabled: true diff --git a/kubernetes/main/apps/kube-system/metrics-server/app/kustomization.yaml b/kubernetes/main/apps/kube-system/metrics-server/app/kustomization.yaml new file mode 100644 index 000000000..17cbc72b2 --- /dev/null +++ b/kubernetes/main/apps/kube-system/metrics-server/app/kustomization.yaml @@ -0,0 +1,6 @@ +--- +# yaml-language-server: $schema=https://json.schemastore.org/kustomization +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - ./helmrelease.yaml diff --git a/kubernetes/main/apps/kube-system/metrics-server/ks.yaml b/kubernetes/main/apps/kube-system/metrics-server/ks.yaml new file mode 100644 index 000000000..c0bd19d06 --- /dev/null +++ b/kubernetes/main/apps/kube-system/metrics-server/ks.yaml @@ -0,0 +1,20 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/kustomize.toolkit.fluxcd.io/kustomization_v1.json +apiVersion: kustomize.toolkit.fluxcd.io/v1 +kind: Kustomization +metadata: + name: &app metrics-server + namespace: flux-system +spec: + targetNamespace: kube-system + commonMetadata: + labels: + app.kubernetes.io/name: *app + path: ./kubernetes/main/apps/kube-system/metrics-server/app + prune: true + sourceRef: + kind: GitRepository + name: home-kubernetes + wait: false + interval: 30m + timeout: 5m diff --git a/kubernetes/main/apps/kube-system/namespace.yaml b/kubernetes/main/apps/kube-system/namespace.yaml new file mode 100644 index 000000000..c16492e39 --- /dev/null +++ b/kubernetes/main/apps/kube-system/namespace.yaml @@ -0,0 +1,38 @@ +--- +apiVersion: v1 +kind: Namespace +metadata: + name: kube-system + annotations: + kustomize.toolkit.fluxcd.io/prune: disabled + volsync.backube/privileged-movers: "true" +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/notification.toolkit.fluxcd.io/provider_v1beta3.json +apiVersion: notification.toolkit.fluxcd.io/v1beta3 +kind: Provider +metadata: + name: alert-manager + namespace: kube-system +spec: + type: alertmanager + address: http://alertmanager-operated.observability.svc.cluster.local:9093/api/v2/alerts/ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/notification.toolkit.fluxcd.io/alert_v1beta3.json +apiVersion: notification.toolkit.fluxcd.io/v1beta3 +kind: Alert +metadata: + name: alert-manager + namespace: kube-system +spec: + providerRef: + name: alert-manager + eventSeverity: error + eventSources: + - kind: HelmRelease + name: "*" + exclusionList: + - "error.*lookup github\\.com" + - "error.*lookup raw\\.githubusercontent\\.com" + - "dial.*tcp.*timeout" + - "waiting.*socket" + suspend: false diff --git a/kubernetes/main/apps/kube-system/nvidia-device-plugin/app/helmrelease.yaml b/kubernetes/main/apps/kube-system/nvidia-device-plugin/app/helmrelease.yaml new file mode 100644 index 000000000..522bbf24f --- /dev/null +++ b/kubernetes/main/apps/kube-system/nvidia-device-plugin/app/helmrelease.yaml @@ -0,0 +1,35 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/helm.toolkit.fluxcd.io/helmrelease_v2.json +apiVersion: helm.toolkit.fluxcd.io/v2 +kind: HelmRelease +metadata: + name: nvidia-device-plugin +spec: + interval: 30m + chart: + spec: + chart: nvidia-device-plugin + version: 0.17.0 + sourceRef: + kind: HelmRepository + name: nvdp + namespace: flux-system + install: + remediation: + retries: 3 + upgrade: + cleanupOnFail: true + remediation: + strategy: rollback + retries: 3 + values: + gfd: + enabled: true + nfd: + enableNodeFeatureApi: true + worker: + config: + sources: + pci: + deviceClassWhitelist: + - "03" diff --git a/kubernetes/main/apps/kube-system/nvidia-device-plugin/app/kustomization.yaml b/kubernetes/main/apps/kube-system/nvidia-device-plugin/app/kustomization.yaml new file mode 100644 index 000000000..5dd7baca7 --- /dev/null +++ b/kubernetes/main/apps/kube-system/nvidia-device-plugin/app/kustomization.yaml @@ -0,0 +1,5 @@ +--- +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - ./helmrelease.yaml diff --git a/kubernetes/main/apps/kube-system/nvidia-device-plugin/config/kustomization.yaml b/kubernetes/main/apps/kube-system/nvidia-device-plugin/config/kustomization.yaml new file mode 100644 index 000000000..4d166dc9a --- /dev/null +++ b/kubernetes/main/apps/kube-system/nvidia-device-plugin/config/kustomization.yaml @@ -0,0 +1,5 @@ +--- +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - ./runtime.yaml diff --git a/kubernetes/main/apps/kube-system/nvidia-device-plugin/config/runtime.yaml b/kubernetes/main/apps/kube-system/nvidia-device-plugin/config/runtime.yaml new file mode 100644 index 000000000..7ba6add19 --- /dev/null +++ b/kubernetes/main/apps/kube-system/nvidia-device-plugin/config/runtime.yaml @@ -0,0 +1,6 @@ +--- +apiVersion: node.k8s.io/v1 +kind: RuntimeClass +metadata: + name: nvidia +handler: nvidia diff --git a/kubernetes/main/apps/kube-system/nvidia-device-plugin/ks.yaml b/kubernetes/main/apps/kube-system/nvidia-device-plugin/ks.yaml new file mode 100644 index 000000000..f23aa39c6 --- /dev/null +++ b/kubernetes/main/apps/kube-system/nvidia-device-plugin/ks.yaml @@ -0,0 +1,40 @@ +--- +apiVersion: kustomize.toolkit.fluxcd.io/v1 +kind: Kustomization +metadata: + name: &app nvdp-config + namespace: flux-system +spec: + targetNamespace: kube-system + commonMetadata: + labels: + app.kubernetes.io/name: *app + path: ./kubernetes/main/apps/kube-system/nvidia-device-plugin/config + prune: false + sourceRef: + kind: GitRepository + name: home-kubernetes + wait: true + interval: 30m + timeout: 5m +--- +apiVersion: kustomize.toolkit.fluxcd.io/v1 +kind: Kustomization +metadata: + name: &app nvdp + namespace: flux-system +spec: + dependsOn: + - name: nvdp-config + targetNamespace: kube-system + commonMetadata: + labels: + app.kubernetes.io/name: *app + path: ./kubernetes/main/apps/kube-system/nvidia-device-plugin/app + prune: false + sourceRef: + kind: GitRepository + name: home-kubernetes + wait: true + interval: 30m + timeout: 5m diff --git a/kubernetes/main/apps/kube-system/reflector/app/helmrelease.yaml b/kubernetes/main/apps/kube-system/reflector/app/helmrelease.yaml new file mode 100644 index 000000000..f58191490 --- /dev/null +++ b/kubernetes/main/apps/kube-system/reflector/app/helmrelease.yaml @@ -0,0 +1,18 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/helm.toolkit.fluxcd.io/helmrelease_v2.json +apiVersion: helm.toolkit.fluxcd.io/v2 +kind: HelmRelease +metadata: + name: reflector +spec: + interval: 30m + chart: + spec: + chart: reflector + version: 7.1.288 + sourceRef: + kind: HelmRepository + name: emberstack + namespace: flux-system + interval: 12h + values: {} diff --git a/kubernetes/main/apps/kube-system/reflector/app/kustomization.yaml b/kubernetes/main/apps/kube-system/reflector/app/kustomization.yaml new file mode 100644 index 000000000..17cbc72b2 --- /dev/null +++ b/kubernetes/main/apps/kube-system/reflector/app/kustomization.yaml @@ -0,0 +1,6 @@ +--- +# yaml-language-server: $schema=https://json.schemastore.org/kustomization +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - ./helmrelease.yaml diff --git a/kubernetes/main/apps/kube-system/reflector/ks.yaml b/kubernetes/main/apps/kube-system/reflector/ks.yaml new file mode 100644 index 000000000..f048fbd69 --- /dev/null +++ b/kubernetes/main/apps/kube-system/reflector/ks.yaml @@ -0,0 +1,20 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/kustomize.toolkit.fluxcd.io/kustomization_v1.json +apiVersion: kustomize.toolkit.fluxcd.io/v1 +kind: Kustomization +metadata: + name: &app reflector + namespace: flux-system +spec: + targetNamespace: kube-system + commonMetadata: + labels: + app.kubernetes.io/name: *app + path: ./kubernetes/main/apps/kube-system/reflector/app + prune: true + sourceRef: + kind: GitRepository + name: home-kubernetes + wait: true + interval: 30m + timeout: 5m diff --git a/kubernetes/main/apps/kube-system/reloader/app/helmrelease.yaml b/kubernetes/main/apps/kube-system/reloader/app/helmrelease.yaml new file mode 100644 index 000000000..567080cc8 --- /dev/null +++ b/kubernetes/main/apps/kube-system/reloader/app/helmrelease.yaml @@ -0,0 +1,33 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/helm.toolkit.fluxcd.io/helmrelease_v2.json +apiVersion: helm.toolkit.fluxcd.io/v2 +kind: HelmRelease +metadata: + name: reloader +spec: + interval: 30m + chart: + spec: + chart: reloader + version: 1.1.0 + sourceRef: + kind: HelmRepository + name: stakater + namespace: flux-system + install: + remediation: + retries: 3 + upgrade: + cleanupOnFail: true + remediation: + strategy: rollback + retries: 3 + values: + fullnameOverride: reloader + reloader: + reloadOnCreate: true + reloadOnDelete: true + readOnlyRootFileSystem: true + podMonitor: + enabled: true + namespace: "{{ .Release.Namespace }}" diff --git a/kubernetes/main/apps/kube-system/reloader/app/kustomization.yaml b/kubernetes/main/apps/kube-system/reloader/app/kustomization.yaml new file mode 100644 index 000000000..17cbc72b2 --- /dev/null +++ b/kubernetes/main/apps/kube-system/reloader/app/kustomization.yaml @@ -0,0 +1,6 @@ +--- +# yaml-language-server: $schema=https://json.schemastore.org/kustomization +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - ./helmrelease.yaml diff --git a/kubernetes/main/apps/kube-system/reloader/ks.yaml b/kubernetes/main/apps/kube-system/reloader/ks.yaml new file mode 100644 index 000000000..2f7c7ba89 --- /dev/null +++ b/kubernetes/main/apps/kube-system/reloader/ks.yaml @@ -0,0 +1,20 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/kustomize.toolkit.fluxcd.io/kustomization_v1.json +apiVersion: kustomize.toolkit.fluxcd.io/v1 +kind: Kustomization +metadata: + name: &app reloader + namespace: flux-system +spec: + targetNamespace: kube-system + commonMetadata: + labels: + app.kubernetes.io/name: *app + path: ./kubernetes/main/apps/kube-system/reloader/app + prune: true + sourceRef: + kind: GitRepository + name: home-kubernetes + wait: false + interval: 30m + timeout: 5m diff --git a/kubernetes/main/apps/kube-system/topolvm/app/helm-values.yaml b/kubernetes/main/apps/kube-system/topolvm/app/helm-values.yaml new file mode 100644 index 000000000..3729eb2bb --- /dev/null +++ b/kubernetes/main/apps/kube-system/topolvm/app/helm-values.yaml @@ -0,0 +1,24 @@ +lvmd: + managed: true + deviceClasses: + - name: thin + volume-group: topolvm_vg + type: thin + default: true + spare-gb: 10 + thin-pool: + name: topolvm_thin + overprovision-ratio: 15.0 +controller: + replicaCount: 1 + updateStrategy: + type: Recreate +storageClasses: + - name: topolvm-provisioner-thin + storageClass: + fsType: xfs + isDefaultClass: true + volumeBindingMode: WaitForFirstConsumer + allowVolumeExpansion: true + additionalParameters: + "topolvm.io/device-class": "thin" diff --git a/kubernetes/main/apps/kube-system/topolvm/app/helmrelease.yaml b/kubernetes/main/apps/kube-system/topolvm/app/helmrelease.yaml new file mode 100644 index 000000000..b1079a809 --- /dev/null +++ b/kubernetes/main/apps/kube-system/topolvm/app/helmrelease.yaml @@ -0,0 +1,27 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/helm.toolkit.fluxcd.io/helmrelease_v2.json +apiVersion: helm.toolkit.fluxcd.io/v2 +kind: HelmRelease +metadata: + name: topolvm +spec: + interval: 30m + chart: + spec: + chart: topolvm + version: 15.5.0 + sourceRef: + kind: HelmRepository + name: topolvm + namespace: flux-system + install: + remediation: + retries: 3 + upgrade: + cleanupOnFail: true + remediation: + strategy: rollback + retries: 3 + valuesFrom: + - kind: ConfigMap + name: topolvm-helm-values diff --git a/kubernetes/main/apps/kube-system/topolvm/app/kustomization.yaml b/kubernetes/main/apps/kube-system/topolvm/app/kustomization.yaml new file mode 100644 index 000000000..3ce450d78 --- /dev/null +++ b/kubernetes/main/apps/kube-system/topolvm/app/kustomization.yaml @@ -0,0 +1,12 @@ +--- +# yaml-language-server: $schema=https://json.schemastore.org/kustomization +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - ./helmrelease.yaml +configMapGenerator: + - name: topolvm-helm-values + files: + - values.yaml=./helm-values.yaml +configurations: + - kustomizeconfig.yaml diff --git a/kubernetes/main/apps/kube-system/topolvm/app/kustomizeconfig.yaml b/kubernetes/main/apps/kube-system/topolvm/app/kustomizeconfig.yaml new file mode 100644 index 000000000..58f92ba15 --- /dev/null +++ b/kubernetes/main/apps/kube-system/topolvm/app/kustomizeconfig.yaml @@ -0,0 +1,7 @@ +--- +nameReference: + - kind: ConfigMap + version: v1 + fieldSpecs: + - path: spec/valuesFrom/name + kind: HelmRelease diff --git a/kubernetes/main/apps/kube-system/topolvm/ks.yaml b/kubernetes/main/apps/kube-system/topolvm/ks.yaml new file mode 100644 index 000000000..1bea0ea72 --- /dev/null +++ b/kubernetes/main/apps/kube-system/topolvm/ks.yaml @@ -0,0 +1,20 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/kustomize.toolkit.fluxcd.io/kustomization_v1.json +apiVersion: kustomize.toolkit.fluxcd.io/v1 +kind: Kustomization +metadata: + name: &app topolvm + namespace: flux-system +spec: + targetNamespace: kube-system + commonMetadata: + labels: + app.kubernetes.io/name: *app + path: ./kubernetes/main/apps/kube-system/topolvm/app + prune: false + sourceRef: + kind: GitRepository + name: home-kubernetes + wait: true + interval: 30m + timeout: 5m diff --git a/kubernetes/main/apps/kyverno/kustomization.yaml b/kubernetes/main/apps/kyverno/kustomization.yaml new file mode 100644 index 000000000..10b5d06cd --- /dev/null +++ b/kubernetes/main/apps/kyverno/kustomization.yaml @@ -0,0 +1,9 @@ +--- +# yaml-language-server: $schema=https://json.schemastore.org/kustomization +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + # Pre Flux-Kustomizations + - ./namespace.yaml + # Flux-Kustomizations + - ./kyverno/ks.yaml diff --git a/kubernetes/main/apps/kyverno/kyverno/app/helmrelease.yaml b/kubernetes/main/apps/kyverno/kyverno/app/helmrelease.yaml new file mode 100644 index 000000000..a177c8e86 --- /dev/null +++ b/kubernetes/main/apps/kyverno/kyverno/app/helmrelease.yaml @@ -0,0 +1,80 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/helm.toolkit.fluxcd.io/helmrelease_v2.json +apiVersion: helm.toolkit.fluxcd.io/v2 +kind: HelmRelease +metadata: + name: &app kyverno +spec: + interval: 30m + chart: + spec: + chart: kyverno + version: 3.3.3 + sourceRef: + kind: HelmRepository + name: kyverno + namespace: flux-system + install: + remediation: + retries: 3 + upgrade: + cleanupOnFail: true + remediation: + strategy: rollback + retries: 3 + values: + crds: + install: true + grafana: + enabled: true + admissionController: + replicas: 1 + rbac: + clusterRole: + extraResources: + - apiGroups: + - "" + resources: + - pods + verbs: + - create + - update + - delete + topologySpreadConstraints: + - maxSkew: 1 + topologyKey: kubernetes.io/hostname + whenUnsatisfiable: DoNotSchedule + labelSelector: + matchLabels: + app.kubernetes.io/instance: *app + app.kubernetes.io/component: admission-controller + serviceMonitor: + enabled: true + backgroundController: + rbac: + clusterRole: + extraResources: + - apiGroups: + - "" + resources: + - pods + verbs: + - create + - update + - patch + - delete + - get + - list + resources: + requests: + cpu: 100m + limits: + memory: 1Gi + serviceMonitor: + enabled: true + cleanupController: + serviceMonitor: + enabled: true + reportsController: + serviceMonitor: + enabled: true diff --git a/kubernetes/main/apps/kyverno/kyverno/app/kustomization.yaml b/kubernetes/main/apps/kyverno/kyverno/app/kustomization.yaml new file mode 100644 index 000000000..17cbc72b2 --- /dev/null +++ b/kubernetes/main/apps/kyverno/kyverno/app/kustomization.yaml @@ -0,0 +1,6 @@ +--- +# yaml-language-server: $schema=https://json.schemastore.org/kustomization +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - ./helmrelease.yaml diff --git a/kubernetes/main/apps/kyverno/kyverno/ks.yaml b/kubernetes/main/apps/kyverno/kyverno/ks.yaml new file mode 100644 index 000000000..e4b225062 --- /dev/null +++ b/kubernetes/main/apps/kyverno/kyverno/ks.yaml @@ -0,0 +1,42 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/kustomize.toolkit.fluxcd.io/kustomization_v1.json +apiVersion: kustomize.toolkit.fluxcd.io/v1 +kind: Kustomization +metadata: + name: &app kyverno + namespace: flux-system +spec: + targetNamespace: kyverno + commonMetadata: + labels: + app.kubernetes.io/name: *app + path: ./kubernetes/main/apps/kyverno/kyverno/app + prune: true + sourceRef: + kind: GitRepository + name: home-kubernetes + wait: true + interval: 30m + timeout: 5m +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/kustomize.toolkit.fluxcd.io/kustomization_v1.json +apiVersion: kustomize.toolkit.fluxcd.io/v1 +kind: Kustomization +metadata: + name: &app kyverno-policies + namespace: flux-system +spec: + targetNamespace: kyverno + commonMetadata: + labels: + app.kubernetes.io/name: *app + dependsOn: + - name: kyverno + path: ./kubernetes/main/apps/kyverno/kyverno/policies + prune: true + sourceRef: + kind: GitRepository + name: home-kubernetes + wait: true + interval: 30m + timeout: 5m diff --git a/kubernetes/main/apps/kyverno/kyverno/policies/dns-config.yaml b/kubernetes/main/apps/kyverno/kyverno/policies/dns-config.yaml new file mode 100644 index 000000000..83d70af64 --- /dev/null +++ b/kubernetes/main/apps/kyverno/kyverno/policies/dns-config.yaml @@ -0,0 +1,30 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/kyverno.io/clusterpolicy_v1.json +apiVersion: kyverno.io/v1 +kind: ClusterPolicy +metadata: + name: add-ndots + annotations: + policies.kyverno.io/title: Add ndots + policies.kyverno.io/category: dnsConfig + policies.kyverno.io/subject: Pod + policies.kyverno.io/minversion: 1.6.0 + policies.kyverno.io/description: >- + The ndots value controls where DNS lookups are first performed in a cluster + and needs to be set to a lower value than the default of 5 in some cases. + This policy mutates all Pods to add the ndots option with a value of 1. +spec: + rules: + - name: add-ndots + match: + any: + - resources: + kinds: + - Pod + mutate: + patchStrategicMerge: + spec: + dnsConfig: + options: + - name: ndots + value: "1" diff --git a/kubernetes/main/apps/kyverno/kyverno/policies/hostpath-readonly.yaml b/kubernetes/main/apps/kyverno/kyverno/policies/hostpath-readonly.yaml new file mode 100644 index 000000000..005f9da58 --- /dev/null +++ b/kubernetes/main/apps/kyverno/kyverno/policies/hostpath-readonly.yaml @@ -0,0 +1,52 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/kyverno.io/clusterpolicy_v1.json +apiVersion: kyverno.io/v1 +kind: ClusterPolicy +metadata: + name: ensure-readonly-hostpath + annotations: + policies.kyverno.io/title: Ensure Read Only hostPath + policies.kyverno.io/category: Other + policies.kyverno.io/severity: medium + policies.kyverno.io/minversion: 1.6.0 + policies.kyverno.io/subject: Pod + policies.kyverno.io/description: >- + Pods which are allowed to mount hostPath volumes in read/write mode pose a security risk + even if confined to a "safe" file system on the host and may escape those confines (see + https://blog.aquasec.com/kubernetes-security-pod-escape-log-mounts). The only true way + to ensure safety is to enforce that all Pods mounting hostPath volumes do so in read only + mode. This policy checks all containers for any hostPath volumes and ensures they are + explicitly mounted in readOnly mode. +spec: + background: false + validationFailureAction: audit + rules: + - name: ensure-hostpaths-readonly + match: + any: + - resources: + kinds: + - Pod + preconditions: + all: + - key: "{{ request.operation || 'BACKGROUND' }}" + operator: AnyIn + value: + - CREATE + - UPDATE + validate: + message: All hostPath volumes must be mounted as readOnly. + foreach: + # Fetch all volumes in the Pod which are a hostPath. Store the names in an array. There could be multiple in a Pod so can't assume just one. + - list: "request.object.spec.volumes[?hostPath][]" + deny: + conditions: + # For every name found for a hostPath volume (stored as `{{element}}`), check all containers, initContainers, and ephemeralContainers which mount this volume and + # total up the number of them. Compare that to the ones with that same name which explicitly specify that `readOnly: true`. If these two + # counts aren't equal, deny the Pod because at least one is attempting to mount that hostPath in read/write mode. Note that the absence of + # the `readOnly: true` field implies read/write access. Therefore, every hostPath volume must explicitly specify that it should be mounted + # in readOnly mode, regardless of where that occurs in a Pod. + any: + - key: "{{ request.object.spec.[containers, initContainers, ephemeralContainers][].volumeMounts[?name == '{{element.name}}'][] | length(@) }}" + operator: NotEquals + value: "{{ request.object.spec.[containers, initContainers, ephemeralContainers][].volumeMounts[?name == '{{element.name}}' && readOnly] [] | length(@) }}" diff --git a/kubernetes/main/apps/kyverno/kyverno/policies/kustomization.yaml b/kubernetes/main/apps/kyverno/kyverno/policies/kustomization.yaml new file mode 100644 index 000000000..b03fe4603 --- /dev/null +++ b/kubernetes/main/apps/kyverno/kyverno/policies/kustomization.yaml @@ -0,0 +1,7 @@ +--- +# yaml-language-server: $schema=https://json.schemastore.org/kustomization +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - ./dns-config.yaml + - ./hostpath-readonly.yaml diff --git a/kubernetes/main/apps/kyverno/namespace.yaml b/kubernetes/main/apps/kyverno/namespace.yaml new file mode 100644 index 000000000..2ca250077 --- /dev/null +++ b/kubernetes/main/apps/kyverno/namespace.yaml @@ -0,0 +1,38 @@ +--- +apiVersion: v1 +kind: Namespace +metadata: + name: kyverno + annotations: + kustomize.toolkit.fluxcd.io/prune: disabled + volsync.backube/privileged-movers: "true" +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/notification.toolkit.fluxcd.io/provider_v1beta3.json +apiVersion: notification.toolkit.fluxcd.io/v1beta3 +kind: Provider +metadata: + name: alert-manager + namespace: kyverno +spec: + type: alertmanager + address: http://alertmanager-operated.observability.svc.cluster.local:9093/api/v2/alerts/ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/notification.toolkit.fluxcd.io/alert_v1beta3.json +apiVersion: notification.toolkit.fluxcd.io/v1beta3 +kind: Alert +metadata: + name: alert-manager + namespace: kyverno +spec: + providerRef: + name: alert-manager + eventSeverity: error + eventSources: + - kind: HelmRelease + name: "*" + exclusionList: + - "error.*lookup github\\.com" + - "error.*lookup raw\\.githubusercontent\\.com" + - "dial.*tcp.*timeout" + - "waiting.*socket" + suspend: false diff --git a/kubernetes/main/apps/media/autobrr/app/externalsecret.yaml b/kubernetes/main/apps/media/autobrr/app/externalsecret.yaml new file mode 100644 index 000000000..d6a1c7145 --- /dev/null +++ b/kubernetes/main/apps/media/autobrr/app/externalsecret.yaml @@ -0,0 +1,32 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/external-secrets.io/externalsecret_v1beta1.json +apiVersion: external-secrets.io/v1beta1 +kind: ExternalSecret +metadata: + name: &secret autobrr-secret +spec: + secretStoreRef: + kind: ClusterSecretStore + name: vault-backend + target: + name: *secret + template: + engineVersion: v2 + data: + AUTOBRR__DATABASE_TYPE: postgres + AUTOBRR__POSTGRES_DATABASE: &dbName autobrr + AUTOBRR__POSTGRES_HOST: &dbHost postgres17-rw.database.svc.cluster.local + AUTOBRR__POSTGRES_USER: &dbUser "{{ .AUTOBRR_POSTGRES_USER }}" + AUTOBRR__POSTGRES_PASS: "{{ .AUTOBRR_POSTGRESS_ENCODED_PASS }}" + AUTOBRR__POSTGRES_PORT: "5432" + AUTOBRR__SESSION_SECRET: "{{ .AUTOBRR_SESSION_SECRET }}" + INIT_POSTGRES_DBNAME: *dbName + INIT_POSTGRES_HOST: *dbHost + INIT_POSTGRES_USER: *dbUser + INIT_POSTGRES_PASS: "{{ .AUTOBRR_POSTGRES_PASS }}" + INIT_POSTGRES_SUPER_PASS: "{{ .POSTGRES_SUPER_PASS }}" + dataFrom: + - extract: + key: secrets/autobrr + - extract: + key: secrets/cloudnative-pg diff --git a/kubernetes/main/apps/media/autobrr/app/gatus-config.yaml b/kubernetes/main/apps/media/autobrr/app/gatus-config.yaml new file mode 100644 index 000000000..9803f5cac --- /dev/null +++ b/kubernetes/main/apps/media/autobrr/app/gatus-config.yaml @@ -0,0 +1,24 @@ +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: "${APP}-gatus-ep" + labels: + gatus.io/enabled: "true" +data: + config.yaml: | + endpoints: + - name: "${APP}" + group: guarded + url: "https://${APP}.${PUBLIC_DOMAIN}${GATUS_PATH:-/}" + interval: 10m + ui: + hide-hostname: true + hide-url: true + client: + dns-resolver: tcp://172.17.0.10:53 + conditions: + - "[STATUS] == 200" + - "[BODY] == OK" + alerts: + - type: pushover diff --git a/kubernetes/main/apps/media/autobrr/app/helmrelease.yaml b/kubernetes/main/apps/media/autobrr/app/helmrelease.yaml new file mode 100644 index 000000000..44b718578 --- /dev/null +++ b/kubernetes/main/apps/media/autobrr/app/helmrelease.yaml @@ -0,0 +1,109 @@ +--- +# yaml-language-server: $schema=https://raw.githubusercontent.com/bjw-s/helm-charts/main/charts/other/app-template/schemas/helmrelease-helm-v2.schema.json +apiVersion: helm.toolkit.fluxcd.io/v2 +kind: HelmRelease +metadata: + name: autobrr +spec: + interval: 30m + chart: + spec: + verify: + provider: cosign + chart: app-template + version: 3.5.1 + sourceRef: + kind: HelmRepository + name: bjw-s + namespace: flux-system + install: + remediation: + retries: 3 + upgrade: + cleanupOnFail: true + remediation: + strategy: rollback + retries: 3 + values: + controllers: + autobrr: + annotations: + reloader.stakater.com/auto: "true" + initContainers: + init-db: + image: + repository: ghcr.io/onedr0p/postgres-init + tag: 16.4@sha256:e41c745b54485341e00efbd27556f0717623a119f0d5107e5ff831aa1322c76f + envFrom: &envFrom + - secretRef: + name: autobrr-secret + containers: + app: + image: + repository: ghcr.io/autobrr/autobrr + tag: v1.50.0@sha256:6a6f23570ab6b418318ab12bf2558712714e2f243cf18b139afa414f8417e97d + env: + AUTOBRR__CHECK_FOR_UPDATES: "false" + AUTOBRR__HOST: 0.0.0.0 + AUTOBRR__PORT: &port 80 + AUTOBRR__LOG_LEVEL: INFO + envFrom: *envFrom + probes: + liveness: &probes + enabled: true + custom: true + spec: + httpGet: + path: /api/healthz/liveness + port: *port + initialDelaySeconds: 0 + periodSeconds: 10 + timeoutSeconds: 1 + failureThreshold: 3 + readiness: + enabled: true + custom: true + spec: + httpGet: + path: /api/healthz/readiness + port: *port + initialDelaySeconds: 0 + periodSeconds: 10 + timeoutSeconds: 1 + failureThreshold: 3 + securityContext: + allowPrivilegeEscalation: false + readOnlyRootFilesystem: true + capabilities: { drop: ["ALL"] } + resources: + requests: + cpu: 10m + limits: + memory: 256Mi + defaultPodOptions: + securityContext: + runAsNonRoot: true + runAsUser: 65534 + runAsGroup: 65534 + seccompProfile: { type: RuntimeDefault } + service: + app: + controller: autobrr + ports: + http: + port: *port + ingress: + app: + annotations: + external-dns.alpha.kubernetes.io/target: internal.${PUBLIC_DOMAIN} + className: internal + hosts: + - host: "{{ .Release.Name }}.${PUBLIC_DOMAIN}" + paths: + - path: / + service: + identifier: app + port: http + persistence: + tmp: + type: emptyDir diff --git a/kubernetes/main/apps/media/autobrr/app/kustomization.yaml b/kubernetes/main/apps/media/autobrr/app/kustomization.yaml new file mode 100644 index 000000000..dce2689e8 --- /dev/null +++ b/kubernetes/main/apps/media/autobrr/app/kustomization.yaml @@ -0,0 +1,17 @@ +--- +# yaml-language-server: $schema=https://json.schemastore.org/kustomization +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - ./externalsecret.yaml + - ./helmrelease.yaml + - ./gatus-config.yaml +configMapGenerator: + - name: autobrr-loki-rules + files: + - autobrr.yaml=./resources/lokirule.yaml + options: + labels: + loki_rule: "true" +generatorOptions: + disableNameSuffixHash: true diff --git a/kubernetes/main/apps/media/autobrr/app/resources/lokirule.yaml b/kubernetes/main/apps/media/autobrr/app/resources/lokirule.yaml new file mode 100644 index 000000000..e478ebbb8 --- /dev/null +++ b/kubernetes/main/apps/media/autobrr/app/resources/lokirule.yaml @@ -0,0 +1,14 @@ +--- +groups: + - name: autobrr + rules: + - alert: AutobrrNetworkUnhealthy + expr: | + sum by (app) (count_over_time({app="autobrr"} |~ "(?i)network unhealthy"[2m])) > 0 + for: 2m + labels: + severity: critical + category: logs + annotations: + app: "{{ $labels.app }}" + summary: "{{ $labels.app }} has a unhealthy network" diff --git a/kubernetes/main/apps/media/autobrr/ks.yaml b/kubernetes/main/apps/media/autobrr/ks.yaml new file mode 100644 index 000000000..347c40c72 --- /dev/null +++ b/kubernetes/main/apps/media/autobrr/ks.yaml @@ -0,0 +1,27 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/kustomize.toolkit.fluxcd.io/kustomization_v1.json +apiVersion: kustomize.toolkit.fluxcd.io/v1 +kind: Kustomization +metadata: + name: &app autobrr + namespace: flux-system +spec: + targetNamespace: media + commonMetadata: + labels: + app.kubernetes.io/name: *app + dependsOn: + - name: cloudnative-pg-cluster + - name: external-secrets-stores + path: ./kubernetes/main/apps/media/autobrr/app + prune: true + sourceRef: + kind: GitRepository + name: home-kubernetes + wait: false + interval: 30m + timeout: 5m + postBuild: + substitute: + APP: *app + GATUS_PATH: /api/healthz/liveness diff --git a/kubernetes/main/apps/media/bazarr/app/externalsecret.yaml b/kubernetes/main/apps/media/bazarr/app/externalsecret.yaml new file mode 100644 index 000000000..e6666f71f --- /dev/null +++ b/kubernetes/main/apps/media/bazarr/app/externalsecret.yaml @@ -0,0 +1,358 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/external-secrets.io/externalsecret_v1beta1.json +apiVersion: external-secrets.io/v1beta1 +kind: ExternalSecret +metadata: + name: &secret bazarr-secret +spec: + secretStoreRef: + kind: ClusterSecretStore + name: vault-backend + target: + name: *secret + template: + engineVersion: v2 + data: + PLEX_TOKEN: "{{ .PLEX_TOKEN }}" + INIT_POSTGRES_USER: "{{ .BAZARR_POSTGRES_USER }}" + INIT_POSTGRES_PASS: "{{ .BAZARR_POSTGRES_PASS }}" + INIT_POSTGRES_SUPER_PASS: "{{ .POSTGRES_SUPER_PASS }}" + dataFrom: + - extract: + key: secrets/plex + - extract: + key: secrets/cloudnative-pg +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/external-secrets.io/externalsecret_v1beta1.json +apiVersion: external-secrets.io/v1beta1 +kind: ExternalSecret +metadata: + name: &secret bazarr-config +spec: + secretStoreRef: + kind: ClusterSecretStore + name: vault-backend + target: + name: *secret + template: + engineVersion: v2 + data: + config.yaml: | + addic7ed: + cookies: '' + password: '' + user_agent: '' + username: '' + vip: false + analytics: + enabled: false + anidb: + api_client: '' + api_client_ver: 1 + animetosho: + anidb_api_client: '' + anidb_api_client_ver: 1 + search_threshold: 6 + anticaptcha: + anti_captcha_key: '' + assrt: + token: '' + auth: + apikey: "{{ .BAZARR_API_KEY}}" + password: '' + type: null + username: '' + avistaz: + cookies: '' + user_agent: '' + backup: + day: 6 + folder: /config/backup + frequency: Weekly + hour: 3 + retention: 31 + betaseries: + token: '' + cinemaz: + cookies: '' + user_agent: '' + cors: + enabled: false + deathbycaptcha: + password: '' + username: '' + embeddedsubtitles: + fallback_lang: en + hi_fallback: true + included_codecs: [] + timeout: 600 + unknown_as_fallback: false + general: + adaptive_searching: true + adaptive_searching_delay: 3w + adaptive_searching_delta: 1w + anti_captcha_provider: null + auto_update: false + base_url: '' + branch: master + chmod: '0640' + chmod_enabled: false + days_to_upgrade_subs: 7 + debug: false + default_und_audio_lang: '' + default_und_embedded_subtitles_lang: '' + dont_notify_manual_actions: false + embedded_subs_show_desired: true + embedded_subtitles_parser: ffprobe + enabled_integrations: [] + enabled_providers: + - subssabbz + - subsunacs + - yavkanet + - embeddedsubtitles + - opensubtitlescom + flask_secret_key: "{{ .FLASK_SECRET_KEY }}" + hi_extension: hi + ignore_ass_subs: false + ignore_pgs_subs: false + ignore_vobsub_subs: false + ip: '*' + language_equals: [] + minimum_score: 90 + minimum_score_movie: 70 + movie_default_enabled: true + movie_default_profile: 1 + movie_tag_enabled: false + multithreading: true + page_size: 25 + parse_embedded_audio_track: false + path_mappings: [] + path_mappings_movie: [] + port: 6767 + postprocessing_cmd: /scripts/subcleaner.sh {{"{{"}}subtitles{{"}}"}} + postprocessing_threshold: 90 + postprocessing_threshold_movie: 70 + remove_profile_tags: [] + serie_default_enabled: true + serie_default_profile: 1 + serie_tag_enabled: false + single_language: false + skip_hashing: false + subfolder: current + subfolder_custom: '' + subzero_mods: remove_HI + theme: auto + upgrade_frequency: 12 + upgrade_manual: true + upgrade_subs: true + use_embedded_subs: false + use_postprocessing: true + use_postprocessing_threshold: false + use_postprocessing_threshold_movie: false + use_radarr: true + use_scenename: true + use_sonarr: true + utf8_encode: true + wanted_search_frequency: 6 + wanted_search_frequency_movie: 6 + hdbits: + passkey: '' + username: '' + jimaku: + api_key: '' + enable_ai_subs: false + enable_archives_download: false + enable_name_search_fallback: true + karagarga: + f_password: '' + f_username: '' + password: '' + username: '' + ktuvit: + email: '' + hashed_password: '' + legendasdivx: + password: '' + skip_wrong_fps: false + username: '' + legendasnet: + password: '' + username: '' + log: + exclude_filter: '' + ignore_case: false + include_filter: '' + use_regex: false + movie_scores: + audio_codec: 3 + edition: 1 + hash: 119 + hearing_impaired: 1 + release_group: 13 + resolution: 2 + source: 7 + streaming_service: 1 + title: 60 + video_codec: 2 + year: 30 + napisy24: + password: '' + username: '' + opensubtitles: + password: '' + skip_wrong_fps: false + ssl: false + timeout: 15 + use_tag_search: false + username: '' + vip: false + opensubtitlescom: + include_ai_translated: false + password: "{{ .OPENSUBTITLES_PASS }}" + use_hash: true + username: "{{ .OPENSUBTITLES_USER }}" + podnapisi: + verify_ssl: true + postgresql: + database: '' + enabled: false + host: localhost + password: '' + port: 5432 + username: '' + proxy: + exclude: + - localhost + - 127.0.0.1 + password: '' + port: '' + type: null + url: '' + username: '' + radarr: + apikey: "{{ .RADARR_API_KEY }}" + base_url: '' + defer_search_signalr: false + excluded_tags: [] + full_update: Daily + full_update_day: 6 + full_update_hour: 4 + http_timeout: 60 + ip: radarr.${PUBLIC_DOMAIN} + movies_sync: 60 + only_monitored: false + port: 443 + ssl: true + sync_only_monitored_movies: false + use_ffprobe_cache: true + series_scores: + audio_codec: 3 + episode: 30 + hash: 359 + hearing_impaired: 1 + release_group: 14 + resolution: 2 + season: 30 + series: 180 + source: 7 + streaming_service: 1 + video_codec: 2 + year: 90 + sonarr: + apikey: "{{ .SONARR_API_KEY }}" + base_url: '' + defer_search_signalr: false + exclude_season_zero: false + excluded_series_types: [] + excluded_tags: [] + full_update: Daily + full_update_day: 6 + full_update_hour: 4 + http_timeout: 60 + ip: sonarr.${PUBLIC_DOMAIN} + only_monitored: false + port: 443 + series_sync: 60 + ssl: true + sync_only_monitored_episodes: false + sync_only_monitored_series: false + use_ffprobe_cache: true + subdl: + api_key: '' + subf2m: + user_agent: '' + verify_ssl: true + subsync: + checker: + blacklisted_languages: [] + blacklisted_providers: [] + debug: false + force_audio: false + gss: true + max_offset_seconds: 60 + no_fix_framerate: false + subsync_movie_threshold: 99 + subsync_threshold: 99 + use_subsync: true + use_subsync_movie_threshold: true + use_subsync_threshold: true + titlovi: + password: '' + username: '' + titulky: + approved_only: false + password: '' + username: '' + whisperai: + endpoint: http://127.0.0.1:9000 + loglevel: INFO + response: 5 + timeout: 3600 + xsubs: + password: '' + username: '' + dataFrom: + - extract: + key: secrets/opensubtitles + - extract: + key: secrets/api-keys +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/external-secrets.io/externalsecret_v1beta1.json +apiVersion: external-secrets.io/v1beta1 +kind: ExternalSecret +metadata: + name: &secret bazarr-gatus-ep +spec: + secretStoreRef: + kind: ClusterSecretStore + name: vault-backend + target: + name: *secret + template: + metadata: + labels: + gatus.io/enabled: "true" + engineVersion: v2 + data: + config.yaml: | + endpoints: + - name: "bazarr" + group: guarded + url: "https://bazarr.${PUBLIC_DOMAIN}/api/system/status" + interval: 5m + ui: + hide-hostname: true + hide-url: true + client: + dns-resolver: tcp://172.17.0.10:53 + conditions: + - "[STATUS] == 200" + - "has([BODY].data.bazarr_version) == true" + alerts: + - type: pushover + headers: + Accept: application/json + X-API-KEY: {{ .BAZARR_API_KEY }} + dataFrom: + - extract: + key: secrets/api-keys diff --git a/kubernetes/main/apps/media/bazarr/app/helmrelease.yaml b/kubernetes/main/apps/media/bazarr/app/helmrelease.yaml new file mode 100644 index 000000000..12de9ea63 --- /dev/null +++ b/kubernetes/main/apps/media/bazarr/app/helmrelease.yaml @@ -0,0 +1,193 @@ +--- +# yaml-language-server: $schema=https://raw.githubusercontent.com/bjw-s/helm-charts/main/charts/other/app-template/schemas/helmrelease-helm-v2.schema.json +apiVersion: helm.toolkit.fluxcd.io/v2 +kind: HelmRelease +metadata: + name: bazarr +spec: + interval: 30m + chart: + spec: + verify: + provider: cosign + chart: app-template + version: 3.5.1 + sourceRef: + kind: HelmRepository + name: bjw-s + namespace: flux-system + install: + remediation: + retries: 3 + upgrade: + cleanupOnFail: true + remediation: + strategy: rollback + retries: 3 + values: + controllers: + bazarr: + annotations: + reloader.stakater.com/auto: "true" + initContainers: + init-db: + image: + repository: ghcr.io/onedr0p/postgres-init + tag: 16.4@sha256:e41c745b54485341e00efbd27556f0717623a119f0d5107e5ff831aa1322c76f + securityContext: &securityContext + allowPrivilegeEscalation: false + readOnlyRootFilesystem: true + capabilities: { drop: ["ALL"] } + env: + INIT_POSTGRES_HOST: &dbHost postgres17-rw.database.svc.cluster.local + INIT_POSTGRES_PORT: &dbPort 5432 + INIT_POSTGRES_DBNAME: &dbName bazarr + INIT_POSTGRES_SUPER_PASS: + valueFrom: + secretKeyRef: + name: &secret bazarr-secret + key: INIT_POSTGRES_SUPER_PASS + INIT_POSTGRES_USER: + valueFrom: + secretKeyRef: + name: *secret + key: INIT_POSTGRES_USER + INIT_POSTGRES_PASS: + valueFrom: + secretKeyRef: + name: *secret + key: INIT_POSTGRES_PASS + containers: + app: + image: + repository: ghcr.io/darkfella91/bazarr + tag: 1.4.5@sha256:1e3ca601b51bb061e342ab8da9ee79062a51f254b50fcc43756299a29647d3aa + env: + TZ: Europe/Sofia + POSTGRES_ENABLED: "true" + POSTGRES_HOST: *dbHost + POSTGRES_PORT: *dbPort + POSTGRES_DATABASE: bazarr + POSTGRES_USERNAME: + valueFrom: + secretKeyRef: + name: *secret + key: INIT_POSTGRES_USER + POSTGRES_PASSWORD: + valueFrom: + secretKeyRef: + name: *secret + key: INIT_POSTGRES_PASS + PLEX_TOKEN: + valueFrom: + secretKeyRef: + name: *secret + key: PLEX_TOKEN + probes: + liveness: &probes + enabled: true + custom: true + spec: + httpGet: + path: /health + port: &port 6767 + initialDelaySeconds: 0 + periodSeconds: 10 + timeoutSeconds: 1 + failureThreshold: 3 + readiness: *probes + securityContext: *securityContext + lifecycle: + postStart: + exec: + command: ["/bin/sh", "-c", "mkdir -p /config/config & cp /secret/config.yaml /config/config/config.yaml"] + resources: + requests: + cpu: 10m + limits: + memory: 1Gi + subcleaner: + image: + repository: registry.k8s.io/git-sync/git-sync + tag: v4.3.0@sha256:5813a7da0ccd58f6dfb9d5e48480e2877355e6bb3d7d81c8908eb1adc3a23b6e + env: + GITSYNC_REPO: https://github.com/KBlixt/subcleaner + GITSYNC_REF: master + GITSYNC_PERIOD: 24h + GITSYNC_ROOT: /add-ons + resources: + requests: + cpu: 10m + limits: + memory: 128Mi + securityContext: *securityContext + defaultPodOptions: + securityContext: + runAsNonRoot: true + runAsUser: 568 + runAsGroup: 568 + fsGroup: 568 + fsGroupChangePolicy: OnRootMismatch + supplementalGroups: [10000] + seccompProfile: { type: RuntimeDefault } + service: + app: + controller: bazarr + ports: + http: + port: *port + ingress: + app: + annotations: + external-dns.alpha.kubernetes.io/target: internal.${PUBLIC_DOMAIN} + nginx.ingress.kubernetes.io/auth-url: "https://$host/oauth2/auth" + nginx.ingress.kubernetes.io/auth-signin: "https://$host/oauth2/start?rd=$escaped_request_uri" + nginx.ingress.kubernetes.io/auth-snippet: | + if ($request_uri ~* "^/api(/|$)") { + return 200; + } + className: internal + hosts: + - host: "{{ .Release.Name }}.${PUBLIC_DOMAIN}" + paths: + - path: / + service: + identifier: app + port: http + persistence: + add-ons: + type: emptyDir + config: + type: emptyDir + secret-file: + type: secret + name: bazarr-config + globalMounts: + - readOnly: true + path: /secret/config.yaml + subPath: config.yaml + media: + type: nfs + server: nas.${PUBLIC_DOMAIN} + path: /mnt/exos20/data + globalMounts: + - path: /data/media + subPath: media + scripts: + type: configMap + name: bazarr-scripts + defaultMode: 0775 + globalMounts: + - readOnly: true + tmp: + type: emptyDir + + test: + type: configMap + name: connectionpool + advancedMounts: + bazarr: + app: + - readOnly: true + path: /app/bin/libs/urllib3/connectionpool.py + subPath: connectionpool.py diff --git a/kubernetes/main/apps/media/bazarr/app/kustomization.yaml b/kubernetes/main/apps/media/bazarr/app/kustomization.yaml new file mode 100644 index 000000000..cc82034b7 --- /dev/null +++ b/kubernetes/main/apps/media/bazarr/app/kustomization.yaml @@ -0,0 +1,18 @@ +--- +# yaml-language-server: $schema=https://json.schemastore.org/kustomization +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - ./externalsecret.yaml + - ./helmrelease.yaml +configMapGenerator: + - name: bazarr-scripts + files: + - subcleaner.sh=./resources/subcleaner.sh + - name: connectionpool + files: + - connectionpool.py=./resources/connectionpool.py +generatorOptions: + disableNameSuffixHash: true + annotations: + kustomize.toolkit.fluxcd.io/substitute: disabled diff --git a/kubernetes/main/apps/media/bazarr/app/resources/connectionpool.py b/kubernetes/main/apps/media/bazarr/app/resources/connectionpool.py new file mode 100644 index 000000000..6ab8ecdfb --- /dev/null +++ b/kubernetes/main/apps/media/bazarr/app/resources/connectionpool.py @@ -0,0 +1,1182 @@ +from __future__ import annotations + +import errno +import logging +import queue +import sys +import typing +import warnings +import weakref +import ssl +from socket import timeout as SocketTimeout +from types import TracebackType + +from ._base_connection import _TYPE_BODY +from ._collections import HTTPHeaderDict +from ._request_methods import RequestMethods +from .connection import ( + BaseSSLError, + BrokenPipeError, + DummyConnection, + HTTPConnection, + HTTPException, + HTTPSConnection, + ProxyConfig, + _wrap_proxy_error, +) +from .connection import port_by_scheme as port_by_scheme +from .exceptions import ( + ClosedPoolError, + EmptyPoolError, + FullPoolError, + HostChangedError, + InsecureRequestWarning, + LocationValueError, + MaxRetryError, + NewConnectionError, + ProtocolError, + ProxyError, + ReadTimeoutError, + SSLError, + TimeoutError, +) +from .response import BaseHTTPResponse +from .util.connection import is_connection_dropped +from .util.proxy import connection_requires_http_tunnel +from .util.request import _TYPE_BODY_POSITION, set_file_position +from .util.retry import Retry +from .util.ssl_match_hostname import CertificateError +from .util.timeout import _DEFAULT_TIMEOUT, _TYPE_DEFAULT, Timeout +from .util.url import Url, _encode_target +from .util.url import _normalize_host as normalize_host +from .util.url import parse_url +from .util.util import to_str + +if typing.TYPE_CHECKING: + import ssl + from typing import Literal + + from ._base_connection import BaseHTTPConnection, BaseHTTPSConnection + +log = logging.getLogger(__name__) + +_TYPE_TIMEOUT = typing.Union[Timeout, float, _TYPE_DEFAULT, None] + +_SelfT = typing.TypeVar("_SelfT") + + +# Pool objects +class ConnectionPool: + """ + Base class for all connection pools, such as + :class:`.HTTPConnectionPool` and :class:`.HTTPSConnectionPool`. + + .. note:: + ConnectionPool.urlopen() does not normalize or percent-encode target URIs + which is useful if your target server doesn't support percent-encoded + target URIs. + """ + + scheme: str | None = None + QueueCls = queue.LifoQueue + + def __init__(self, host: str, port: int | None = None) -> None: + if not host: + raise LocationValueError("No host specified.") + + self.host = _normalize_host(host, scheme=self.scheme) + self.port = port + + # This property uses 'normalize_host()' (not '_normalize_host()') + # to avoid removing square braces around IPv6 addresses. + # This value is sent to `HTTPConnection.set_tunnel()` if called + # because square braces are required for HTTP CONNECT tunneling. + self._tunnel_host = normalize_host(host, scheme=self.scheme).lower() + + def __str__(self) -> str: + return f"{type(self).__name__}(host={self.host!r}, port={self.port!r})" + + def __enter__(self: _SelfT) -> _SelfT: + return self + + def __exit__( + self, + exc_type: type[BaseException] | None, + exc_val: BaseException | None, + exc_tb: TracebackType | None, + ) -> Literal[False]: + self.close() + # Return False to re-raise any potential exceptions + return False + + def close(self) -> None: + """ + Close all pooled connections and disable the pool. + """ + + +# This is taken from http://hg.python.org/cpython/file/7aaba721ebc0/Lib/socket.py#l252 +_blocking_errnos = {errno.EAGAIN, errno.EWOULDBLOCK} + + +class HTTPConnectionPool(ConnectionPool, RequestMethods): + """ + Thread-safe connection pool for one host. + + :param host: + Host used for this HTTP Connection (e.g. "localhost"), passed into + :class:`http.client.HTTPConnection`. + + :param port: + Port used for this HTTP Connection (None is equivalent to 80), passed + into :class:`http.client.HTTPConnection`. + + :param timeout: + Socket timeout in seconds for each individual connection. This can + be a float or integer, which sets the timeout for the HTTP request, + or an instance of :class:`urllib3.util.Timeout` which gives you more + fine-grained control over request timeouts. After the constructor has + been parsed, this is always a `urllib3.util.Timeout` object. + + :param maxsize: + Number of connections to save that can be reused. More than 1 is useful + in multithreaded situations. If ``block`` is set to False, more + connections will be created but they will not be saved once they've + been used. + + :param block: + If set to True, no more than ``maxsize`` connections will be used at + a time. When no free connections are available, the call will block + until a connection has been released. This is a useful side effect for + particular multithreaded situations where one does not want to use more + than maxsize connections per host to prevent flooding. + + :param headers: + Headers to include with all requests, unless other headers are given + explicitly. + + :param retries: + Retry configuration to use by default with requests in this pool. + + :param _proxy: + Parsed proxy URL, should not be used directly, instead, see + :class:`urllib3.ProxyManager` + + :param _proxy_headers: + A dictionary with proxy headers, should not be used directly, + instead, see :class:`urllib3.ProxyManager` + + :param \\**conn_kw: + Additional parameters are used to create fresh :class:`urllib3.connection.HTTPConnection`, + :class:`urllib3.connection.HTTPSConnection` instances. + """ + + scheme = "http" + ConnectionCls: ( + type[BaseHTTPConnection] | type[BaseHTTPSConnection] + ) = HTTPConnection + + def __init__( + self, + host: str, + port: int | None = None, + timeout: _TYPE_TIMEOUT | None = _DEFAULT_TIMEOUT, + maxsize: int = 1, + block: bool = False, + headers: typing.Mapping[str, str] | None = None, + retries: Retry | bool | int | None = None, + _proxy: Url | None = None, + _proxy_headers: typing.Mapping[str, str] | None = None, + _proxy_config: ProxyConfig | None = None, + **conn_kw: typing.Any, + ): + ConnectionPool.__init__(self, host, port) + RequestMethods.__init__(self, headers) + + if not isinstance(timeout, Timeout): + timeout = Timeout.from_float(timeout) + + if retries is None: + retries = Retry.DEFAULT + + self.timeout = timeout + self.retries = retries + + self.pool: queue.LifoQueue[typing.Any] | None = self.QueueCls(maxsize) + self.block = block + + self.proxy = _proxy + self.proxy_headers = _proxy_headers or {} + self.proxy_config = _proxy_config + + # Fill the queue up so that doing get() on it will block properly + for _ in range(maxsize): + self.pool.put(None) + + # These are mostly for testing and debugging purposes. + self.num_connections = 0 + self.num_requests = 0 + self.conn_kw = conn_kw + + if self.proxy: + # Enable Nagle's algorithm for proxies, to avoid packet fragmentation. + # We cannot know if the user has added default socket options, so we cannot replace the + # list. + self.conn_kw.setdefault("socket_options", []) + + self.conn_kw["proxy"] = self.proxy + self.conn_kw["proxy_config"] = self.proxy_config + + # Do not pass 'self' as callback to 'finalize'. + # Then the 'finalize' would keep an endless living (leak) to self. + # By just passing a reference to the pool allows the garbage collector + # to free self if nobody else has a reference to it. + pool = self.pool + + # Close all the HTTPConnections in the pool before the + # HTTPConnectionPool object is garbage collected. + weakref.finalize(self, _close_pool_connections, pool) + + def _new_conn(self) -> BaseHTTPConnection: + """ + Return a fresh :class:`HTTPConnection`. + """ + self.num_connections += 1 + log.debug( + "Starting new HTTP connection (%d): %s:%s", + self.num_connections, + self.host, + self.port or "80", + ) + + conn = self.ConnectionCls( + host=self.host, + port=self.port, + timeout=self.timeout.connect_timeout, + **self.conn_kw, + ) + return conn + + def _get_conn(self, timeout: float | None = None) -> BaseHTTPConnection: + """ + Get a connection. Will return a pooled connection if one is available. + + If no connections are available and :prop:`.block` is ``False``, then a + fresh connection is returned. + + :param timeout: + Seconds to wait before giving up and raising + :class:`urllib3.exceptions.EmptyPoolError` if the pool is empty and + :prop:`.block` is ``True``. + """ + conn = None + + if self.pool is None: + raise ClosedPoolError(self, "Pool is closed.") + + try: + conn = self.pool.get(block=self.block, timeout=timeout) + + except AttributeError: # self.pool is None + raise ClosedPoolError(self, "Pool is closed.") from None # Defensive: + + except queue.Empty: + if self.block: + raise EmptyPoolError( + self, + "Pool is empty and a new connection can't be opened due to blocking mode.", + ) from None + pass # Oh well, we'll create a new connection then + + # If this is a persistent connection, check if it got disconnected + if conn and is_connection_dropped(conn): + log.debug("Resetting dropped connection: %s", self.host) + conn.close() + + return conn or self._new_conn() + + def _put_conn(self, conn: BaseHTTPConnection | None) -> None: + """ + Put a connection back into the pool. + + :param conn: + Connection object for the current host and port as returned by + :meth:`._new_conn` or :meth:`._get_conn`. + + If the pool is already full, the connection is closed and discarded + because we exceeded maxsize. If connections are discarded frequently, + then maxsize should be increased. + + If the pool is closed, then the connection will be closed and discarded. + """ + if self.pool is not None: + try: + self.pool.put(conn, block=False) + return # Everything is dandy, done. + except AttributeError: + # self.pool is None. + pass + except queue.Full: + # Connection never got put back into the pool, close it. + if conn: + conn.close() + + if self.block: + # This should never happen if you got the conn from self._get_conn + raise FullPoolError( + self, + "Pool reached maximum size and no more connections are allowed.", + ) from None + + log.warning( + "Connection pool is full, discarding connection: %s. Connection pool size: %s", + self.host, + self.pool.qsize(), + ) + + # Connection never got put back into the pool, close it. + if conn: + conn.close() + + def _validate_conn(self, conn: BaseHTTPConnection) -> None: + """ + Called right before a request is made, after the socket is created. + """ + + def _prepare_proxy(self, conn: BaseHTTPConnection) -> None: + # Nothing to do for HTTP connections. + pass + + def _get_timeout(self, timeout: _TYPE_TIMEOUT) -> Timeout: + """Helper that always returns a :class:`urllib3.util.Timeout`""" + if timeout is _DEFAULT_TIMEOUT: + return self.timeout.clone() + + if isinstance(timeout, Timeout): + return timeout.clone() + else: + # User passed us an int/float. This is for backwards compatibility, + # can be removed later + return Timeout.from_float(timeout) + + def _raise_timeout( + self, + err: BaseSSLError | OSError | SocketTimeout, + url: str, + timeout_value: _TYPE_TIMEOUT | None, + ) -> None: + """Is the error actually a timeout? Will raise a ReadTimeout or pass""" + + if isinstance(err, SocketTimeout): + raise ReadTimeoutError( + self, url, f"Read timed out. (read timeout={timeout_value})" + ) from err + + # See the above comment about EAGAIN in Python 3. + if hasattr(err, "errno") and err.errno in _blocking_errnos: + raise ReadTimeoutError( + self, url, f"Read timed out. (read timeout={timeout_value})" + ) from err + + def _make_request( + self, + conn: BaseHTTPConnection, + method: str, + url: str, + body: _TYPE_BODY | None = None, + headers: typing.Mapping[str, str] | None = None, + retries: Retry | None = None, + timeout: _TYPE_TIMEOUT = _DEFAULT_TIMEOUT, + chunked: bool = False, + response_conn: BaseHTTPConnection | None = None, + preload_content: bool = True, + decode_content: bool = True, + enforce_content_length: bool = True, + ) -> BaseHTTPResponse: + """ + Perform a request on a given urllib connection object taken from our + pool. + + :param conn: + a connection from one of our connection pools + + :param method: + HTTP request method (such as GET, POST, PUT, etc.) + + :param url: + The URL to perform the request on. + + :param body: + Data to send in the request body, either :class:`str`, :class:`bytes`, + an iterable of :class:`str`/:class:`bytes`, or a file-like object. + + :param headers: + Dictionary of custom headers to send, such as User-Agent, + If-None-Match, etc. If None, pool headers are used. If provided, + these headers completely replace any pool-specific headers. + + :param retries: + Configure the number of retries to allow before raising a + :class:`~urllib3.exceptions.MaxRetryError` exception. + + Pass ``None`` to retry until you receive a response. Pass a + :class:`~urllib3.util.retry.Retry` object for fine-grained control + over different types of retries. + Pass an integer number to retry connection errors that many times, + but no other types of errors. Pass zero to never retry. + + If ``False``, then retries are disabled and any exception is raised + immediately. Also, instead of raising a MaxRetryError on redirects, + the redirect response will be returned. + + :type retries: :class:`~urllib3.util.retry.Retry`, False, or an int. + + :param timeout: + If specified, overrides the default timeout for this one + request. It may be a float (in seconds) or an instance of + :class:`urllib3.util.Timeout`. + + :param chunked: + If True, urllib3 will send the body using chunked transfer + encoding. Otherwise, urllib3 will send the body using the standard + content-length form. Defaults to False. + + :param response_conn: + Set this to ``None`` if you will handle releasing the connection or + set the connection to have the response release it. + + :param preload_content: + If True, the response's body will be preloaded during construction. + + :param decode_content: + If True, will attempt to decode the body based on the + 'content-encoding' header. + + :param enforce_content_length: + Enforce content length checking. Body returned by server must match + value of Content-Length header, if present. Otherwise, raise error. + """ + self.num_requests += 1 + + timeout_obj = self._get_timeout(timeout) + timeout_obj.start_connect() + conn.timeout = Timeout.resolve_default_timeout(timeout_obj.connect_timeout) + + try: + # Trigger any extra validation we need to do. + try: + self._validate_conn(conn) + except (SocketTimeout, BaseSSLError) as e: + self._raise_timeout(err=e, url=url, timeout_value=conn.timeout) + raise + + # _validate_conn() starts the connection to an HTTPS proxy + # so we need to wrap errors with 'ProxyError' here too. + except ( + OSError, + NewConnectionError, + TimeoutError, + BaseSSLError, + CertificateError, + SSLError, + ) as e: + new_e: Exception = e + if isinstance(e, (BaseSSLError, CertificateError)): + new_e = SSLError(e) + # If the connection didn't successfully connect to it's proxy + # then there + if isinstance( + new_e, (OSError, NewConnectionError, TimeoutError, SSLError) + ) and (conn and conn.proxy and not conn.has_connected_to_proxy): + new_e = _wrap_proxy_error(new_e, conn.proxy.scheme) + raise new_e + + # conn.request() calls http.client.*.request, not the method in + # urllib3.request. It also calls makefile (recv) on the socket. + try: + conn.request( + method, + url, + body=body, + headers=headers, + chunked=chunked, + preload_content=preload_content, + decode_content=decode_content, + enforce_content_length=enforce_content_length, + ) + + # We are swallowing BrokenPipeError (errno.EPIPE) since the server is + # legitimately able to close the connection after sending a valid response. + # With this behaviour, the received response is still readable. + except BrokenPipeError: + pass + except OSError as e: + # MacOS/Linux + # EPROTOTYPE and ECONNRESET are needed on macOS + # https://erickt.github.io/blog/2014/11/19/adventures-in-debugging-a-potential-osx-kernel-bug/ + # Condition changed later to emit ECONNRESET instead of only EPROTOTYPE. + if e.errno != errno.EPROTOTYPE and e.errno != errno.ECONNRESET: + raise + + # Reset the timeout for the recv() on the socket + read_timeout = timeout_obj.read_timeout + + if not conn.is_closed: + # In Python 3 socket.py will catch EAGAIN and return None when you + # try and read into the file pointer created by http.client, which + # instead raises a BadStatusLine exception. Instead of catching + # the exception and assuming all BadStatusLine exceptions are read + # timeouts, check for a zero timeout before making the request. + if read_timeout == 0: + raise ReadTimeoutError( + self, url, f"Read timed out. (read timeout={read_timeout})" + ) + conn.timeout = read_timeout + + # Receive the response from the server + try: + response = conn.getresponse() + except (BaseSSLError, OSError) as e: + self._raise_timeout(err=e, url=url, timeout_value=read_timeout) + raise + + # Set properties that are used by the pooling layer. + response.retries = retries + response._connection = response_conn # type: ignore[attr-defined] + response._pool = self # type: ignore[attr-defined] + + # emscripten connection doesn't have _http_vsn_str + http_version = getattr(conn, "_http_vsn_str", "HTTP/?") + log.debug( + '%s://%s:%s "%s %s %s" %s %s', + self.scheme, + self.host, + self.port, + method, + url, + # HTTP version + http_version, + response.status, + response.length_remaining, + ) + + return response + + def close(self) -> None: + """ + Close all pooled connections and disable the pool. + """ + if self.pool is None: + return + # Disable access to the pool + old_pool, self.pool = self.pool, None + + # Close all the HTTPConnections in the pool. + _close_pool_connections(old_pool) + + def is_same_host(self, url: str) -> bool: + """ + Check if the given ``url`` is a member of the same host as this + connection pool. + """ + if url.startswith("/"): + return True + + # TODO: Add optional support for socket.gethostbyname checking. + scheme, _, host, port, *_ = parse_url(url) + scheme = scheme or "http" + if host is not None: + host = _normalize_host(host, scheme=scheme) + + # Use explicit default port for comparison when none is given + if self.port and not port: + port = port_by_scheme.get(scheme) + elif not self.port and port == port_by_scheme.get(scheme): + port = None + + return (scheme, host, port) == (self.scheme, self.host, self.port) + + def urlopen( # type: ignore[override] + self, + method: str, + url: str, + body: _TYPE_BODY | None = None, + headers: typing.Mapping[str, str] | None = None, + retries: Retry | bool | int | None = None, + redirect: bool = True, + assert_same_host: bool = True, + timeout: _TYPE_TIMEOUT = _DEFAULT_TIMEOUT, + pool_timeout: int | None = None, + release_conn: bool | None = None, + chunked: bool = False, + body_pos: _TYPE_BODY_POSITION | None = None, + preload_content: bool = True, + decode_content: bool = True, + **response_kw: typing.Any, + ) -> BaseHTTPResponse: + """ + Get a connection from the pool and perform an HTTP request. This is the + lowest level call for making a request, so you'll need to specify all + the raw details. + + .. note:: + + More commonly, it's appropriate to use a convenience method + such as :meth:`request`. + + .. note:: + + `release_conn` will only behave as expected if + `preload_content=False` because we want to make + `preload_content=False` the default behaviour someday soon without + breaking backwards compatibility. + + :param method: + HTTP request method (such as GET, POST, PUT, etc.) + + :param url: + The URL to perform the request on. + + :param body: + Data to send in the request body, either :class:`str`, :class:`bytes`, + an iterable of :class:`str`/:class:`bytes`, or a file-like object. + + :param headers: + Dictionary of custom headers to send, such as User-Agent, + If-None-Match, etc. If None, pool headers are used. If provided, + these headers completely replace any pool-specific headers. + + :param retries: + Configure the number of retries to allow before raising a + :class:`~urllib3.exceptions.MaxRetryError` exception. + + If ``None`` (default) will retry 3 times, see ``Retry.DEFAULT``. Pass a + :class:`~urllib3.util.retry.Retry` object for fine-grained control + over different types of retries. + Pass an integer number to retry connection errors that many times, + but no other types of errors. Pass zero to never retry. + + If ``False``, then retries are disabled and any exception is raised + immediately. Also, instead of raising a MaxRetryError on redirects, + the redirect response will be returned. + + :type retries: :class:`~urllib3.util.retry.Retry`, False, or an int. + + :param redirect: + If True, automatically handle redirects (status codes 301, 302, + 303, 307, 308). Each redirect counts as a retry. Disabling retries + will disable redirect, too. + + :param assert_same_host: + If ``True``, will make sure that the host of the pool requests is + consistent else will raise HostChangedError. When ``False``, you can + use the pool on an HTTP proxy and request foreign hosts. + + :param timeout: + If specified, overrides the default timeout for this one + request. It may be a float (in seconds) or an instance of + :class:`urllib3.util.Timeout`. + + :param pool_timeout: + If set and the pool is set to block=True, then this method will + block for ``pool_timeout`` seconds and raise EmptyPoolError if no + connection is available within the time period. + + :param bool preload_content: + If True, the response's body will be preloaded into memory. + + :param bool decode_content: + If True, will attempt to decode the body based on the + 'content-encoding' header. + + :param release_conn: + If False, then the urlopen call will not release the connection + back into the pool once a response is received (but will release if + you read the entire contents of the response such as when + `preload_content=True`). This is useful if you're not preloading + the response's content immediately. You will need to call + ``r.release_conn()`` on the response ``r`` to return the connection + back into the pool. If None, it takes the value of ``preload_content`` + which defaults to ``True``. + + :param bool chunked: + If True, urllib3 will send the body using chunked transfer + encoding. Otherwise, urllib3 will send the body using the standard + content-length form. Defaults to False. + + :param int body_pos: + Position to seek to in file-like body in the event of a retry or + redirect. Typically this won't need to be set because urllib3 will + auto-populate the value when needed. + """ + parsed_url = parse_url(url) + destination_scheme = parsed_url.scheme + + if headers is None: + headers = self.headers + + if not isinstance(retries, Retry): + retries = Retry.from_int(retries, redirect=redirect, default=self.retries) + + if release_conn is None: + release_conn = preload_content + + # Check host + if assert_same_host and not self.is_same_host(url): + raise HostChangedError(self, url, retries) + + # Ensure that the URL we're connecting to is properly encoded + if url.startswith("/"): + url = to_str(_encode_target(url)) + else: + url = to_str(parsed_url.url) + + conn = None + + # Track whether `conn` needs to be released before + # returning/raising/recursing. Update this variable if necessary, and + # leave `release_conn` constant throughout the function. That way, if + # the function recurses, the original value of `release_conn` will be + # passed down into the recursive call, and its value will be respected. + # + # See issue #651 [1] for details. + # + # [1] + release_this_conn = release_conn + + http_tunnel_required = connection_requires_http_tunnel( + self.proxy, self.proxy_config, destination_scheme + ) + + # Merge the proxy headers. Only done when not using HTTP CONNECT. We + # have to copy the headers dict so we can safely change it without those + # changes being reflected in anyone else's copy. + if not http_tunnel_required: + headers = headers.copy() # type: ignore[attr-defined] + headers.update(self.proxy_headers) # type: ignore[union-attr] + + # Must keep the exception bound to a separate variable or else Python 3 + # complains about UnboundLocalError. + err = None + + # Keep track of whether we cleanly exited the except block. This + # ensures we do proper cleanup in finally. + clean_exit = False + + # Rewind body position, if needed. Record current position + # for future rewinds in the event of a redirect/retry. + body_pos = set_file_position(body, body_pos) + + try: + # Request a connection from the queue. + timeout_obj = self._get_timeout(timeout) + conn = self._get_conn(timeout=pool_timeout) + + conn.timeout = timeout_obj.connect_timeout # type: ignore[assignment] + + # Is this a closed/new connection that requires CONNECT tunnelling? + if self.proxy is not None and http_tunnel_required and conn.is_closed: + try: + self._prepare_proxy(conn) + except (BaseSSLError, OSError, SocketTimeout) as e: + self._raise_timeout( + err=e, url=self.proxy.url, timeout_value=conn.timeout + ) + raise + + # If we're going to release the connection in ``finally:``, then + # the response doesn't need to know about the connection. Otherwise + # it will also try to release it and we'll have a double-release + # mess. + response_conn = conn if not release_conn else None + + # Make the request on the HTTPConnection object + response = self._make_request( + conn, + method, + url, + timeout=timeout_obj, + body=body, + headers=headers, + chunked=chunked, + retries=retries, + response_conn=response_conn, + preload_content=preload_content, + decode_content=decode_content, + **response_kw, + ) + + # Everything went great! + clean_exit = True + + except EmptyPoolError: + # Didn't get a connection from the pool, no need to clean up + clean_exit = True + release_this_conn = False + raise + + except ( + TimeoutError, + HTTPException, + OSError, + ProtocolError, + BaseSSLError, + SSLError, + CertificateError, + ProxyError, + ) as e: + # Discard the connection for these exceptions. It will be + # replaced during the next _get_conn() call. + clean_exit = False + new_e: Exception = e + if isinstance(e, (BaseSSLError, CertificateError)): + new_e = SSLError(e) + if isinstance( + new_e, + ( + OSError, + NewConnectionError, + TimeoutError, + SSLError, + HTTPException, + ), + ) and (conn and conn.proxy and not conn.has_connected_to_proxy): + new_e = _wrap_proxy_error(new_e, conn.proxy.scheme) + elif isinstance(new_e, (OSError, HTTPException)): + new_e = ProtocolError("Connection aborted.", new_e) + + retries = retries.increment( + method, url, error=new_e, _pool=self, _stacktrace=sys.exc_info()[2] + ) + retries.sleep() + + # Keep track of the error for the retry warning. + err = e + + finally: + if not clean_exit: + # We hit some kind of exception, handled or otherwise. We need + # to throw the connection away unless explicitly told not to. + # Close the connection, set the variable to None, and make sure + # we put the None back in the pool to avoid leaking it. + if conn: + conn.close() + conn = None + release_this_conn = True + + if release_this_conn: + # Put the connection back to be reused. If the connection is + # expired then it will be None, which will get replaced with a + # fresh connection during _get_conn. + self._put_conn(conn) + + if not conn: + # Try again + log.warning( + "Retrying (%r) after connection broken by '%r': %s", retries, err, url + ) + return self.urlopen( + method, + url, + body, + headers, + retries, + redirect, + assert_same_host, + timeout=timeout, + pool_timeout=pool_timeout, + release_conn=release_conn, + chunked=chunked, + body_pos=body_pos, + preload_content=preload_content, + decode_content=decode_content, + **response_kw, + ) + + # Handle redirect? + redirect_location = redirect and response.get_redirect_location() + if redirect_location: + if response.status == 303: + # Change the method according to RFC 9110, Section 15.4.4. + method = "GET" + # And lose the body not to transfer anything sensitive. + body = None + headers = HTTPHeaderDict(headers)._prepare_for_method_change() + + try: + retries = retries.increment(method, url, response=response, _pool=self) + except MaxRetryError: + if retries.raise_on_redirect: + response.drain_conn() + raise + return response + + response.drain_conn() + retries.sleep_for_retry(response) + log.debug("Redirecting %s -> %s", url, redirect_location) + return self.urlopen( + method, + redirect_location, + body, + headers, + retries=retries, + redirect=redirect, + assert_same_host=assert_same_host, + timeout=timeout, + pool_timeout=pool_timeout, + release_conn=release_conn, + chunked=chunked, + body_pos=body_pos, + preload_content=preload_content, + decode_content=decode_content, + **response_kw, + ) + + # Check if we should retry the HTTP response. + has_retry_after = bool(response.headers.get("Retry-After")) + if retries.is_retry(method, response.status, has_retry_after): + try: + retries = retries.increment(method, url, response=response, _pool=self) + except MaxRetryError: + if retries.raise_on_status: + response.drain_conn() + raise + return response + + response.drain_conn() + retries.sleep(response) + log.debug("Retry: %s", url) + return self.urlopen( + method, + url, + body, + headers, + retries=retries, + redirect=redirect, + assert_same_host=assert_same_host, + timeout=timeout, + pool_timeout=pool_timeout, + release_conn=release_conn, + chunked=chunked, + body_pos=body_pos, + preload_content=preload_content, + decode_content=decode_content, + **response_kw, + ) + + return response + + +class HTTPSConnectionPool(HTTPConnectionPool): + """ + Same as :class:`.HTTPConnectionPool`, but HTTPS. + Always enables SSL certificate validation. + """ + + scheme = "https" + ConnectionCls: type[HTTPSConnection] = HTTPSConnection + + def __init__( + self, + host: str, + port: int | None = None, + timeout: int | None = None, + maxsize: int = 1, + block: bool = False, + headers: typing.Mapping[str, str] | None = None, + retries: Retry | bool | int | None = None, + _proxy: Url | None = None, + _proxy_headers: typing.Mapping[str, str] | None = None, + key_file: str | None = None, + cert_file: str | None = None, + cert_reqs: int | str | None = ssl.CERT_REQUIRED, # Always validate certs + key_password: str | None = None, + ca_certs: str | None = None, + ssl_version: int | str | None = None, + ssl_minimum_version: ssl.TLSVersion | None = None, + ssl_maximum_version: ssl.TLSVersion | None = None, + assert_hostname: bool | str | None = None, + assert_fingerprint: str | None = None, + ca_cert_dir: str | None = None, + **conn_kw: typing.Any, + ) -> None: + super().__init__( + host, + port, + timeout, + maxsize, + block, + headers, + retries, + _proxy, + _proxy_headers, + **conn_kw, + ) + + # Initialize SSL-related arguments + self.key_file = key_file + self.cert_file = cert_file + self.cert_reqs = cert_reqs + self.key_password = key_password + self.ca_certs = ca_certs + self.ca_cert_dir = ca_cert_dir + self.ssl_version = ssl_version + self.ssl_minimum_version = ssl_minimum_version + self.ssl_maximum_version = ssl_maximum_version + self.assert_hostname = assert_hostname + self.assert_fingerprint = assert_fingerprint + + def _prepare_proxy(self, conn: HTTPSConnection) -> None: # type: ignore[override] + """Establishes a tunnel connection through HTTP CONNECT.""" + if self.proxy and self.proxy.scheme == "https": + tunnel_scheme = "https" + else: + tunnel_scheme = "http" + + conn.set_tunnel( + scheme=tunnel_scheme, + host=self._tunnel_host, + port=self.port, + headers=self.proxy_headers, + ) + conn.connect() + + def _new_conn(self) -> HTTPSConnection: + """ + Return a fresh :class:`urllib3.connection.HTTPSConnection`. + Always enforces certificate validation. + """ + self.num_connections += 1 + log.debug( + "Starting new HTTPS connection (%d): %s:%s", + self.num_connections, + self.host, + self.port or "443", + ) + + if not self.ConnectionCls or self.ConnectionCls is DummyConnection: # type: ignore[comparison-overlap] + raise ImportError( + "Can't connect to HTTPS URL because the SSL module is not available." + ) + + actual_host: str = self.host + actual_port = self.port + if self.proxy is not None and self.proxy.host is not None: + actual_host = self.proxy.host + actual_port = self.proxy.port + + # Create the HTTPS connection, enforcing certificate validation + return self.ConnectionCls( + host=actual_host, + port=actual_port, + timeout=self.timeout.connect_timeout, + cert_file=self.cert_file, + key_file=self.key_file, + key_password=self.key_password, + cert_reqs=ssl.CERT_REQUIRED, # Ensure always verifying certificates + ca_certs=self.ca_certs, + ca_cert_dir=self.ca_cert_dir, + assert_hostname=self.assert_hostname, + assert_fingerprint=self.assert_fingerprint, + ssl_version=self.ssl_version, + ssl_minimum_version=self.ssl_minimum_version, + ssl_maximum_version=self.ssl_maximum_version, + **self.conn_kw, + ) + + def _validate_conn(self, conn: HTTPSConnection) -> None: + """ + Called right before a request is made, after the socket is created. + Always enforces SSL verification. + """ + super()._validate_conn(conn) + + # Force connection early to allow us to validate the connection. + if conn.is_closed: + conn.connect() + + # Always ensure certificate verification is done and show warning if not verified + if not conn.is_verified and not conn.proxy_is_verified: + warnings.warn( + ( + f"Unverified HTTPS request is being made to host '{conn.host}'. " + "Adding certificate verification is strongly advised. See: " + "https://urllib3.readthedocs.io/en/latest/advanced-usage.html" + "#tls-warnings" + ), + InsecureRequestWarning, + ) + +def connection_from_url(url: str, **kw: typing.Any) -> HTTPConnectionPool: + """ + Given a url, return an :class:`.ConnectionPool` instance of its host. + + This is a shortcut for not having to parse out the scheme, host, and port + of the url before creating an :class:`.ConnectionPool` instance. + + :param url: + Absolute URL string that must include the scheme. Port is optional. + + :param \\**kw: + Passes additional parameters to the constructor of the appropriate + :class:`.ConnectionPool`. Useful for specifying things like + timeout, maxsize, headers, etc. + + Example:: + + >>> conn = connection_from_url('http://google.com/') + >>> r = conn.request('GET', '/') + """ + scheme, _, host, port, *_ = parse_url(url) + scheme = scheme or "http" + port = port or port_by_scheme.get(scheme, 80) + if scheme == "https": + return HTTPSConnectionPool(host, port=port, **kw) # type: ignore[arg-type] + else: + return HTTPConnectionPool(host, port=port, **kw) # type: ignore[arg-type] + + +@typing.overload +def _normalize_host(host: None, scheme: str | None) -> None: + ... + + +@typing.overload +def _normalize_host(host: str, scheme: str | None) -> str: + ... + + +def _normalize_host(host: str | None, scheme: str | None) -> str | None: + """ + Normalize hosts for comparisons and use with sockets. + """ + + host = normalize_host(host, scheme) + + # httplib doesn't like it when we include brackets in IPv6 addresses + # Specifically, if we include brackets but also pass the port then + # httplib crazily doubles up the square brackets on the Host header. + # Instead, we need to make sure we never pass ``None`` as the port. + # However, for backward compatibility reasons we can't actually + # *assert* that. See http://bugs.python.org/issue28539 + if host and host.startswith("[") and host.endswith("]"): + host = host[1:-1] + return host + + +def _url_from_pool( + pool: HTTPConnectionPool | HTTPSConnectionPool, path: str | None = None +) -> str: + """Returns the URL from a given connection pool. This is mainly used for testing and logging.""" + return Url(scheme=pool.scheme, host=pool.host, port=pool.port, path=path).url + + +def _close_pool_connections(pool: queue.LifoQueue[typing.Any]) -> None: + """Drains a queue of connections and closes each one.""" + try: + while True: + conn = pool.get(block=False) + if conn: + conn.close() + except queue.Empty: + pass # Done. \ No newline at end of file diff --git a/kubernetes/main/apps/media/bazarr/app/resources/subcleaner.sh b/kubernetes/main/apps/media/bazarr/app/resources/subcleaner.sh new file mode 100644 index 000000000..34f06a55b --- /dev/null +++ b/kubernetes/main/apps/media/bazarr/app/resources/subcleaner.sh @@ -0,0 +1,18 @@ +#!/usr/bin/env bash + +printf "Cleaning subtitles for '%s' ...\n" "$1" +python3 /add-ons/subcleaner/subcleaner.py "$1" -s + +case $1 in + *movies*) section="1";; + *shows*) section="2";; +esac + +if [[ -n "$section" ]]; then + printf "Refreshing Plex section '%s' for '%s' ...\n" "$section" "$(dirname "$1")" + /usr/bin/curl -I -X GET -G \ + --data-urlencode "path=$(dirname "$1")" \ + --data-urlencode "X-Plex-Token=${PLEX_TOKEN}" \ + --no-progress-meter \ + "http://plex.media.svc.cluster.local:32400/library/sections/${section}/refresh" +fi diff --git a/kubernetes/main/apps/media/bazarr/ks.yaml b/kubernetes/main/apps/media/bazarr/ks.yaml new file mode 100644 index 000000000..3b71c9d8f --- /dev/null +++ b/kubernetes/main/apps/media/bazarr/ks.yaml @@ -0,0 +1,22 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/kustomize.toolkit.fluxcd.io/kustomization_v1.json +apiVersion: kustomize.toolkit.fluxcd.io/v1 +kind: Kustomization +metadata: + name: &app bazarr + namespace: flux-system +spec: + targetNamespace: media + commonMetadata: + labels: + app.kubernetes.io/name: *app + dependsOn: + - name: external-secrets-stores + path: ./kubernetes/main/apps/media/bazarr/app + prune: true + sourceRef: + kind: GitRepository + name: home-kubernetes + wait: false + interval: 30m + timeout: 5m diff --git a/kubernetes/main/apps/media/flaresolverr/app/helmrelease.yaml b/kubernetes/main/apps/media/flaresolverr/app/helmrelease.yaml new file mode 100644 index 000000000..85c62cee1 --- /dev/null +++ b/kubernetes/main/apps/media/flaresolverr/app/helmrelease.yaml @@ -0,0 +1,83 @@ +--- +# yaml-language-server: $schema=https://raw.githubusercontent.com/bjw-s/helm-charts/main/charts/other/app-template/schemas/helmrelease-helm-v2.schema.json +apiVersion: helm.toolkit.fluxcd.io/v2 +kind: HelmRelease +metadata: + name: flaresolverr +spec: + interval: 30m + chart: + spec: + verify: + provider: cosign + chart: app-template + version: 3.5.1 + sourceRef: + kind: HelmRepository + name: bjw-s + namespace: flux-system + install: + remediation: + retries: 6 + upgrade: + cleanupOnFail: true + remediation: + strategy: rollback + retries: 3 + values: + controllers: + flaresolverr: + containers: + app: + image: + repository: ghcr.io/flaresolverr/flaresolverr + tag: v3.3.21@sha256:f104ee51e5124d83cf3be9b37480649355d223f7d8f9e453d0d5ef06c6e3b31b + env: + TZ: Europe/Sofia + PORT: &port 80 + HOST: "0.0.0.0" + TEST_URL: https://www.google.com + BROWSER_TIMEOUT: 40000 + HEADLESS: true + LOG_LEVEL: info + LOG_HTML: false + CAPTCHA_SOLVER: none + probes: + liveness: &probes + enabled: true + custom: true + spec: + httpGet: + path: /health + port: *port + initialDelaySeconds: 0 + periodSeconds: 10 + timeoutSeconds: 1 + failureThreshold: 3 + readiness: *probes + securityContext: + allowPrivilegeEscalation: false + readOnlyRootFilesystem: false + capabilities: { drop: ["ALL"] } + resources: + requests: + cpu: 10m + limits: + memory: 1024Mi + defaultPodOptions: + securityContext: + runAsNonRoot: true + runAsUser: 1000 + runAsGroup: 1000 + fsGroup: 1000 + fsGroupChangePolicy: OnRootMismatch + seccompProfile: { type: RuntimeDefault } + service: + app: + controller: flaresolverr + ports: + http: + port: *port + persistence: + tmp: + type: emptyDir diff --git a/kubernetes/main/apps/media/flaresolverr/app/kustomization.yaml b/kubernetes/main/apps/media/flaresolverr/app/kustomization.yaml new file mode 100644 index 000000000..17cbc72b2 --- /dev/null +++ b/kubernetes/main/apps/media/flaresolverr/app/kustomization.yaml @@ -0,0 +1,6 @@ +--- +# yaml-language-server: $schema=https://json.schemastore.org/kustomization +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - ./helmrelease.yaml diff --git a/kubernetes/main/apps/media/flaresolverr/ks.yaml b/kubernetes/main/apps/media/flaresolverr/ks.yaml new file mode 100644 index 000000000..57d915ffe --- /dev/null +++ b/kubernetes/main/apps/media/flaresolverr/ks.yaml @@ -0,0 +1,23 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/kustomize.toolkit.fluxcd.io/kustomization_v1.json +apiVersion: kustomize.toolkit.fluxcd.io/v1 +kind: Kustomization +metadata: + name: &app flaresolverr + namespace: flux-system +spec: + targetNamespace: media + commonMetadata: + labels: + app.kubernetes.io/name: *app + path: ./kubernetes/main/apps/media/flaresolverr/app + prune: true + sourceRef: + kind: GitRepository + name: home-kubernetes + wait: false + interval: 30m + timeout: 5m + postBuild: + substitute: + APP: *app diff --git a/kubernetes/main/apps/media/jellyseerr/app/helmrelease.yaml b/kubernetes/main/apps/media/jellyseerr/app/helmrelease.yaml new file mode 100644 index 000000000..e9231116d --- /dev/null +++ b/kubernetes/main/apps/media/jellyseerr/app/helmrelease.yaml @@ -0,0 +1,113 @@ +--- +# yaml-language-server: $schema=https://raw.githubusercontent.com/bjw-s/helm-charts/main/charts/other/app-template/schemas/helmrelease-helm-v2.schema.json +apiVersion: helm.toolkit.fluxcd.io/v2 +kind: HelmRelease +metadata: + name: jellyseerr +spec: + interval: 30m + chart: + spec: + verify: + provider: cosign + chart: app-template + version: 3.5.1 + sourceRef: + kind: HelmRepository + name: bjw-s + namespace: flux-system + install: + remediation: + retries: 3 + upgrade: + cleanupOnFail: true + remediation: + strategy: rollback + retries: 3 + dependsOn: + - name: volsync + namespace: volsync-system + values: + controllers: + jellyseerr: + annotations: + reloader.stakater.com/auto: "true" + containers: + app: + image: + repository: ghcr.io/fallenbagel/jellyseerr + tag: develop@sha256:98a3dd60edc97cfe048a47a4d497b0a57c85d83f3bcfb354a63955df2481454a + env: + TZ: Europe/Sofia + LOG_LEVEL: "info" + PORT: &port 80 + probes: + liveness: &probes + enabled: true + custom: true + spec: + httpGet: + path: /api/v1/status + port: *port + initialDelaySeconds: 0 + periodSeconds: 10 + timeoutSeconds: 1 + failureThreshold: 3 + readiness: *probes + securityContext: + allowPrivilegeEscalation: false + readOnlyRootFilesystem: true + capabilities: { drop: ["ALL"] } + resources: + requests: + cpu: 10m + memory: 512Mi + limits: + memory: 2Gi + defaultPodOptions: + securityContext: + runAsNonRoot: true + runAsUser: 568 + runAsGroup: 568 + fsGroup: 568 + fsGroupChangePolicy: OnRootMismatch + seccompProfile: { type: RuntimeDefault } + service: + app: + controller: jellyseerr + ports: + http: + port: *port + ingress: + app: + annotations: + external-dns.alpha.kubernetes.io/target: external.${PUBLIC_DOMAIN} + nginx.ingress.kubernetes.io/modsecurity-snippet: | + SecAction "id:900200, phase:1, pass, t:none, nolog, setvar:tx.allowed_methods=GET HEAD POST OPTIONS PUT DELETE PATCH" + nginx.ingress.kubernetes.io/configuration-snippet: | + more_set_headers "access-control-allow-origin https://requests.${PUBLIC_DOMAIN}"; + proxy_set_header Accept-Encoding ""; + sub_filter '' ''; + sub_filter_once on; + className: external + hosts: + - host: requests.${PUBLIC_DOMAIN} + paths: + - path: / + service: + identifier: app + port: http + persistence: + config: + enabled: true + type: persistentVolumeClaim + size: 20Gi + accessMode: ReadWriteOnce + globalMounts: + - path: /app/config + logs: + type: emptyDir + globalMounts: + - path: /app/config/logs + tmp: + type: emptyDir diff --git a/kubernetes/main/apps/media/jellyseerr/app/kustomization.yaml b/kubernetes/main/apps/media/jellyseerr/app/kustomization.yaml new file mode 100644 index 000000000..bba2bed56 --- /dev/null +++ b/kubernetes/main/apps/media/jellyseerr/app/kustomization.yaml @@ -0,0 +1,8 @@ +--- +# yaml-language-server: $schema=https://json.schemastore.org/kustomization +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - ./helmrelease.yaml + - ./volsync.yaml + - ../../../../templates/gatus/external diff --git a/kubernetes/main/apps/media/jellyseerr/app/volsync.yaml b/kubernetes/main/apps/media/jellyseerr/app/volsync.yaml new file mode 100644 index 000000000..31e42c4a6 --- /dev/null +++ b/kubernetes/main/apps/media/jellyseerr/app/volsync.yaml @@ -0,0 +1,47 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.ok8.sh/external-secrets.io/externalsecret_v1beta1.json +apiVersion: external-secrets.io/v1beta1 +kind: ExternalSecret +metadata: + name: &secret jellyseerr-volsync-secret +spec: + secretStoreRef: + kind: ClusterSecretStore + name: vault-backend + target: + name: *secret + creationPolicy: Owner + template: + engineVersion: v2 + data: + RESTIC_REPOSITORY: '{{ .RESTIC_REPOSITORY }}/jellyseerr/volsync' + RESTIC_PASSWORD: '{{ .ENCRYPTION_KEY }}' + AWS_ACCESS_KEY_ID: '{{ .CF_ACCESS_KEY_ID }}' + AWS_SECRET_ACCESS_KEY: '{{ .CF_SECRET_ACCESS_KEY }}' + dataFrom: + - extract: + key: secrets/volsync + - extract: + key: secrets/cloudflare +--- +# yaml-language-server: $schema=https://kubernetes-schemas.ok8.sh/volsync.backube/replicationsource_v1alpha1.json +apiVersion: volsync.backube/v1alpha1 +kind: ReplicationSource +metadata: + name: jellyseerr +spec: + sourcePVC: jellyseerr-config + trigger: + schedule: "0 7 * * *" + restic: + copyMethod: Snapshot + pruneIntervalDays: 7 + repository: jellyseerr-volsync-secret + cacheCapacity: 2Gi + moverSecurityContext: + runAsUser: 568 + runAsGroup: 568 + fsGroup: 568 + retain: + daily: 7 + within: 3d diff --git a/kubernetes/main/apps/media/jellyseerr/ks.yaml b/kubernetes/main/apps/media/jellyseerr/ks.yaml new file mode 100644 index 000000000..cad339495 --- /dev/null +++ b/kubernetes/main/apps/media/jellyseerr/ks.yaml @@ -0,0 +1,25 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/kustomize.toolkit.fluxcd.io/kustomization_v1.json +apiVersion: kustomize.toolkit.fluxcd.io/v1 +kind: Kustomization +metadata: + name: &app jellyseerr + namespace: flux-system +spec: + targetNamespace: media + commonMetadata: + labels: + app.kubernetes.io/name: *app + path: ./kubernetes/main/apps/media/jellyseerr/app + prune: true + sourceRef: + kind: GitRepository + name: home-kubernetes + wait: false + interval: 30m + timeout: 5m + postBuild: + substitute: + APP: *app + GATUS_SUBDOMAIN: requests + GATUS_PATH: /api/v1/status diff --git a/kubernetes/main/apps/media/kustomization.yaml b/kubernetes/main/apps/media/kustomization.yaml new file mode 100644 index 000000000..e413c3eb6 --- /dev/null +++ b/kubernetes/main/apps/media/kustomization.yaml @@ -0,0 +1,21 @@ +--- +# yaml-language-server: $schema=https://json.schemastore.org/kustomization +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + # Pre Flux-Kustomizations + - ./namespace.yaml + # Flux-Kustomizations + - ./jellyseerr/ks.yaml + - ./radarr/ks.yaml + - ./sonarr/ks.yaml + - ./unpackerr/ks.yaml + - ./autobrr/ks.yaml + - ./bazarr/ks.yaml + - ./sabnzbd/ks.yaml + - ./qbittorrent/ks.yaml + - ./prowlarr/ks.yaml + - ./flaresolverr/ks.yaml + - ./notifiarr/ks.yaml + - ./omegabrr/ks.yaml + - ./plex/ks.yaml diff --git a/kubernetes/main/apps/media/namespace.yaml b/kubernetes/main/apps/media/namespace.yaml new file mode 100644 index 000000000..a9e74c9e5 --- /dev/null +++ b/kubernetes/main/apps/media/namespace.yaml @@ -0,0 +1,38 @@ +--- +apiVersion: v1 +kind: Namespace +metadata: + name: media + annotations: + kustomize.toolkit.fluxcd.io/prune: disabled + volsync.backube/privileged-movers: "true" +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/notification.toolkit.fluxcd.io/provider_v1beta3.json +apiVersion: notification.toolkit.fluxcd.io/v1beta3 +kind: Provider +metadata: + name: alert-manager + namespace: media +spec: + type: alertmanager + address: http://alertmanager-operated.observability.svc.cluster.local:9093/api/v2/alerts/ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/notification.toolkit.fluxcd.io/alert_v1beta3.json +apiVersion: notification.toolkit.fluxcd.io/v1beta3 +kind: Alert +metadata: + name: alert-manager + namespace: media +spec: + providerRef: + name: alert-manager + eventSeverity: error + eventSources: + - kind: HelmRelease + name: "*" + exclusionList: + - "error.*lookup github\\.com" + - "error.*lookup raw\\.githubusercontent\\.com" + - "dial.*tcp.*timeout" + - "waiting.*socket" + suspend: false diff --git a/kubernetes/main/apps/media/notifiarr/app/externalsecret.yaml b/kubernetes/main/apps/media/notifiarr/app/externalsecret.yaml new file mode 100644 index 000000000..c04bbc5c3 --- /dev/null +++ b/kubernetes/main/apps/media/notifiarr/app/externalsecret.yaml @@ -0,0 +1,33 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/external-secrets.io/externalsecret_v1beta1.json +apiVersion: external-secrets.io/v1beta1 +kind: ExternalSecret +metadata: + name: &secret notifiarr-secret +spec: + secretStoreRef: + kind: ClusterSecretStore + name: vault-backend + target: + name: *secret + template: + engineVersion: v2 + data: + DN_API_KEY: "{{ .DN_API_KEY }}" + DN_UI_PASSWORD: "{{ .DN_UI_PASSWORD }}" + DN_UPSTREAMS_0: "{{ .DN_UPSTREAMS_0 }}" + TMPDIR: "{{ .TMPDIR }}" + DN_BIND_ADDR: "{{ .DN_BIND_ADDR }}" + DN_MODE: "{{ .DN_MODE }}" + DN_PLEX_TOKEN: "{{ .DN_PLEX_TOKEN }}" + DN_PLEX_URL: "{{ .DN_PLEX_URL }}" + DN_RADARR_0_API_KEY: "{{ .DN_RADARR_0_API_KEY }}" + DN_RADARR_0_NAME: "{{ .DN_RADARR_0_NAME }}" + DN_RADARR_0_URL: "{{ .DN_RADARR_0_URL }}" + DN_SONARR_0_API_KEY: "{{ .DN_SONARR_0_API_KEY }}" + DN_SONARR_0_NAME: "{{ .DN_SONARR_0_NAME }}" + DN_SONARR_0_URL: "{{ .DN_SONARR_0_URL }}" + + dataFrom: + - extract: + key: secrets/notifiarr diff --git a/kubernetes/main/apps/media/notifiarr/app/helmrelease.yaml b/kubernetes/main/apps/media/notifiarr/app/helmrelease.yaml new file mode 100644 index 000000000..f4760efaa --- /dev/null +++ b/kubernetes/main/apps/media/notifiarr/app/helmrelease.yaml @@ -0,0 +1,103 @@ +--- +# yaml-language-server: $schema=https://raw.githubusercontent.com/bjw-s/helm-charts/main/charts/other/app-template/schemas/helmrelease-helm-v2.schema.json +apiVersion: helm.toolkit.fluxcd.io/v2 +kind: HelmRelease +metadata: + name: notifiarr +spec: + interval: 30m + chart: + spec: + verify: + provider: cosign + chart: app-template + version: 3.5.1 + sourceRef: + kind: HelmRepository + name: bjw-s + namespace: flux-system + install: + remediation: + retries: 3 + upgrade: + cleanupOnFail: true + remediation: + strategy: rollback + retries: 3 + values: + controllers: + notifiarr: + annotations: + reloader.stakater.com/auto: "true" + containers: + app: + image: + repository: golift/notifiarr + tag: 0.8.3@sha256:7922f7d0e0336ca0e91182820c5d4b2ddc2d86083fa847c5a1088b41d5b20903 + envFrom: + - secretRef: + name: notifiarr-secret + probes: + liveness: &probes + enabled: true + custom: true + spec: + httpGet: + path: / + port: &port 80 + initialDelaySeconds: 0 + periodSeconds: 10 + timeoutSeconds: 1 + failureThreshold: 3 + readiness: *probes + startup: + enabled: true + spec: + failureThreshold: 30 + periodSeconds: 10 + securityContext: + allowPrivilegeEscalation: false + readOnlyRootFilesystem: true + capabilities: { drop: ["ALL"] } + resources: + requests: + cpu: 100m + limits: + memory: 1Gi + defaultPodOptions: + hostname: def39d9a-546b-4b72-9466-a858e8aba5ff + securityContext: + runAsNonRoot: true + runAsUser: 1000 + runAsGroup: 1000 + fsGroup: 1000 + fsGroupChangePolicy: OnRootMismatch + seccompProfile: { type: RuntimeDefault } + service: + app: + controller: notifiarr + ports: + http: + port: *port + ingress: + app: + annotations: + external-dns.alpha.kubernetes.io/target: internal.${PUBLIC_DOMAIN} + className: internal + hosts: + - host: "{{ .Release.Name }}.${PUBLIC_DOMAIN}" + paths: + - path: / + service: + identifier: app + port: http + persistence: + config: + enabled: true + type: persistentVolumeClaim + size: 2Gi + accessMode: ReadWriteOnce + + tmpdir: + type: emptyDir + medium: Memory diff --git a/kubernetes/main/apps/media/notifiarr/ks.yaml b/kubernetes/main/apps/media/notifiarr/ks.yaml new file mode 100644 index 000000000..e78eaf796 --- /dev/null +++ b/kubernetes/main/apps/media/notifiarr/ks.yaml @@ -0,0 +1,25 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/kustomize.toolkit.fluxcd.io/kustomization_v1.json +apiVersion: kustomize.toolkit.fluxcd.io/v1 +kind: Kustomization +metadata: + name: &app notifiarr + namespace: flux-system +spec: + targetNamespace: media + commonMetadata: + labels: + app.kubernetes.io/name: *app + dependsOn: + - name: external-secrets-stores + path: ./kubernetes/main/apps/media/notifiarr/app + prune: true + sourceRef: + kind: GitRepository + name: home-kubernetes + wait: false + interval: 30m + timeout: 5m + postBuild: + substitute: + APP: *app diff --git a/kubernetes/main/apps/media/omegabrr/app/externalsecret.yaml b/kubernetes/main/apps/media/omegabrr/app/externalsecret.yaml new file mode 100644 index 000000000..349b9cdda --- /dev/null +++ b/kubernetes/main/apps/media/omegabrr/app/externalsecret.yaml @@ -0,0 +1,40 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/external-secrets.io/externalsecret_v1beta1.json +apiVersion: external-secrets.io/v1beta1 +kind: ExternalSecret +metadata: + name: &secret omegabrr-secret +spec: + refreshInterval: 5m + secretStoreRef: + kind: ClusterSecretStore + name: vault-backend + target: + name: *secret + template: + engineVersion: v2 + data: + config.yaml: | + server: + host: 0.0.0.0 + port: 80 + apiToken: {{ .OMEGABRR_API_KEY }} + clients: + autobrr: + host: https://autobrr.${PUBLIC_DOMAIN} + apikey: {{ .AUTOBRR_API_KEY }} + arr: + - name: radarr + type: radarr + host: https://radarr.${PUBLIC_DOMAIN} + apikey: {{ .RADARR_API_KEY }} + filters: [5] + - name: sonarr + type: sonarr + host: https://sonarr.${PUBLIC_DOMAIN} + apikey: {{ .SONARR_API_KEY }} + filters: [6] + excludeAlternateTitles: true + dataFrom: + - extract: + key: secrets/api-keys diff --git a/kubernetes/main/apps/media/omegabrr/app/helmrelease.yaml b/kubernetes/main/apps/media/omegabrr/app/helmrelease.yaml new file mode 100644 index 000000000..6ea388356 --- /dev/null +++ b/kubernetes/main/apps/media/omegabrr/app/helmrelease.yaml @@ -0,0 +1,67 @@ +--- +# yaml-language-server: $schema=https://raw.githubusercontent.com/bjw-s/helm-charts/main/charts/other/app-template/schemas/helmrelease-helm-v2.schema.json +apiVersion: helm.toolkit.fluxcd.io/v2 +kind: HelmRelease +metadata: + name: omegabrr +spec: + interval: 30m + chart: + spec: + verify: + provider: cosign + chart: app-template + version: 3.5.1 + sourceRef: + kind: HelmRepository + name: bjw-s + namespace: flux-system + install: + remediation: + retries: 3 + upgrade: + cleanupOnFail: true + remediation: + strategy: rollback + retries: 3 + values: + controllers: + omegabrr: + annotations: + reloader.stakater.com/auto: "true" + containers: + app: + image: + repository: ghcr.io/autobrr/omegabrr + tag: v1.15.0@sha256:4f6099a76ff9d248e9f032e29c04a92b483f21456e46f3b01eb20399f4732ad0 + env: + TZ: Europe/Sofia + securityContext: + allowPrivilegeEscalation: false + readOnlyRootFilesystem: true + capabilities: { drop: ["ALL"] } + resources: + requests: + cpu: 10m + limits: + memory: 256Mi + defaultPodOptions: + securityContext: + runAsNonRoot: true + runAsUser: 65534 + runAsGroup: 65534 + seccompProfile: { type: RuntimeDefault } + service: + app: + controller: omegabrr + ports: + http: + port: 80 + persistence: + config-file: + type: secret + name: omegabrr-secret + globalMounts: + - path: /config/config.yaml + subPath: config.yaml + readOnly: true diff --git a/kubernetes/main/apps/media/omegabrr/app/kustomization.yaml b/kubernetes/main/apps/media/omegabrr/app/kustomization.yaml new file mode 100644 index 000000000..85e530b33 --- /dev/null +++ b/kubernetes/main/apps/media/omegabrr/app/kustomization.yaml @@ -0,0 +1,8 @@ +--- +# yaml-language-server: $schema=https://json.schemastore.org/kustomization +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +namespace: default +resources: + - ./externalsecret.yaml + - ./helmrelease.yaml diff --git a/kubernetes/main/apps/media/omegabrr/ks.yaml b/kubernetes/main/apps/media/omegabrr/ks.yaml new file mode 100644 index 000000000..d4006d805 --- /dev/null +++ b/kubernetes/main/apps/media/omegabrr/ks.yaml @@ -0,0 +1,25 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/kustomize.toolkit.fluxcd.io/kustomization_v1.json +apiVersion: kustomize.toolkit.fluxcd.io/v1 +kind: Kustomization +metadata: + name: &app omegabrr + namespace: flux-system +spec: + targetNamespace: media + commonMetadata: + labels: + app.kubernetes.io/name: *app + dependsOn: + - name: external-secrets-stores + path: ./kubernetes/main/apps/media/omegabrr/app + prune: true + sourceRef: + kind: GitRepository + name: home-kubernetes + wait: false + interval: 30m + timeout: 5m + postBuild: + substitute: + APP: *app diff --git a/kubernetes/main/apps/media/plex/app/helmrelease.yaml b/kubernetes/main/apps/media/plex/app/helmrelease.yaml new file mode 100644 index 000000000..943db5a73 --- /dev/null +++ b/kubernetes/main/apps/media/plex/app/helmrelease.yaml @@ -0,0 +1,150 @@ +--- +# yaml-language-server: $schema=https://raw.githubusercontent.com/bjw-s/helm-charts/main/charts/other/app-template/schemas/helmrelease-helm-v2.schema.json +apiVersion: helm.toolkit.fluxcd.io/v2 +kind: HelmRelease +metadata: + name: plex +spec: + interval: 30m + chart: + spec: + chart: app-template + version: 3.5.1 + sourceRef: + kind: HelmRepository + name: bjw-s + namespace: flux-system + install: + remediation: + retries: 3 + upgrade: + cleanupOnFail: true + remediation: + strategy: rollback + retries: 3 + dependsOn: + - name: topolvm + namespace: kube-system + + values: + controllers: + plex: + annotations: + reloader.stakater.com/auto: "true" + + containers: + app: + image: + repository: ghcr.io/onedr0p/plex + tag: 1.41.2.9200-c6bbc1b53@sha256:47c6f3d85f4e739210860934a0bb24126170fa2f6a602fb909467f17a035c311 + env: + TZ: Europe/Sofia + PLEX_ADVERTISE_URL: https://plex.${PUBLIC_DOMAIN}:443,http://192.168.91.98:32400 + PLEX_UID: 568 + PLEX_GID: 568 + NVIDIA_VISIBLE_DEVICES: all + NVIDIA_DRIVER_CAPABILITIES: all + probes: + liveness: &probes + enabled: true + custom: true + spec: + httpGet: + path: /identity + port: 32400 + initialDelaySeconds: 0 + periodSeconds: 10 + timeoutSeconds: 1 + failureThreshold: 3 + readiness: *probes + startup: + enabled: true + spec: + failureThreshold: 30 + periodSeconds: 10 + securityContext: + allowPrivilegeEscalation: false + readOnlyRootFilesystem: true + capabilities: { drop: ["ALL"] } + resources: + requests: + cpu: 100m + limits: + memory: 3Gi + nvidia.com/gpu: 1 + + defaultPodOptions: + securityContext: + runAsNonRoot: true + runAsUser: 568 + runAsGroup: 568 + fsGroup: 568 + fsGroupChangePolicy: OnRootMismatch + seccompProfile: { type: RuntimeDefault } + + service: + app: + controller: plex + type: LoadBalancer + allocateLoadBalancerNodePorts: false + annotations: + lbipam.cilium.io/ips: 192.168.91.98 + ports: + http: + port: 32400 + + ingress: + app: + annotations: + external-dns.alpha.kubernetes.io/target: external.${PUBLIC_DOMAIN} + nginx.ingress.kubernetes.io/backend-protocol: "HTTPS" + nginx.ingress.kubernetes.io/enable-modsecurity: "true" + nginx.ingress.kubernetes.io/modsecurity-transaction-id: "$request_id" + nginx.ingress.kubernetes.io/modsecurity-snippet: | + Include /etc/nginx/modsecurity/modsecurity.conf + Include /etc/nginx/owasp-modsecurity-crs/nginx-modsecurity.conf + nginx.ingress.kubernetes.io/configuration-snippet: | + add_header referrer-policy "same-origin" always; + add_header x-frame-options sameorigin; + add_header x-content-type-options "nosniff" always; + more_set_headers "access-control-allow-origin https://plex.${PUBLIC_DOMAIN}"; + className: external + hosts: + - host: "{{ .Release.Name }}.${PUBLIC_DOMAIN}" + paths: + - path: / + pathType: ImplementationSpecific + service: + identifier: app + port: http + + persistence: + config: + type: persistentVolumeClaim + size: 100Gi + accessMode: ReadWriteOnce + globalMounts: + - path: /config/Library/Application Support/Plex Media Server + + logs: + type: emptyDir + globalMounts: + - path: /config/Library/Application Support/Plex Media Server/Logs + + tmp: + type: emptyDir + medium: Memory + + transcode: + type: persistentVolumeClaim + size: 200Gi + accessMode: ReadWriteOnce + + media: + type: nfs + server: nas.${PUBLIC_DOMAIN} + path: /mnt/exos20/data + globalMounts: + - path: /data/media + readOnly: true + subPath: media diff --git a/kubernetes/main/apps/media/plex/app/kustomization.yaml b/kubernetes/main/apps/media/plex/app/kustomization.yaml new file mode 100644 index 000000000..15350ddfb --- /dev/null +++ b/kubernetes/main/apps/media/plex/app/kustomization.yaml @@ -0,0 +1,16 @@ +--- +# yaml-language-server: $schema=https://json.schemastore.org/kustomization +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - ./helmrelease.yaml + - ./resources/gatus-ep.yaml +configMapGenerator: + - name: plex-loki-rules + files: + - plex.yaml=./resources/lokirule.yaml + options: + labels: + loki_rule: "true" +generatorOptions: + disableNameSuffixHash: true diff --git a/kubernetes/main/apps/media/plex/app/resources/gatus-ep.yaml b/kubernetes/main/apps/media/plex/app/resources/gatus-ep.yaml new file mode 100644 index 000000000..e4c260ac4 --- /dev/null +++ b/kubernetes/main/apps/media/plex/app/resources/gatus-ep.yaml @@ -0,0 +1,20 @@ +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: "plex-gatus-ep" + labels: + gatus.io/enabled: "true" +data: + config.yaml: | + endpoints: + - name: "plex" + group: external + url: "https://plex.${PUBLIC_DOMAIN}/web" + interval: 1m + client: + dns-resolver: tcp://1.1.1.1:53 + conditions: + - "[STATUS] == 200" + alerts: + - type: pushover diff --git a/kubernetes/main/apps/media/plex/app/resources/lokirule.yaml b/kubernetes/main/apps/media/plex/app/resources/lokirule.yaml new file mode 100644 index 000000000..ed0c09a34 --- /dev/null +++ b/kubernetes/main/apps/media/plex/app/resources/lokirule.yaml @@ -0,0 +1,14 @@ +--- +groups: + - name: plex + rules: + - alert: PlexDatabaseBusy + expr: | + sum by (app) (count_over_time({app="plex"} |~ "(?i)retry busy DB"[2m])) > 0 + for: 2m + labels: + severity: critical + category: logs + annotations: + app: "{{ $labels.app }}" + summary: "{{ $labels.app }} is experiencing database issues" diff --git a/kubernetes/main/apps/media/plex/ks.yaml b/kubernetes/main/apps/media/plex/ks.yaml new file mode 100644 index 000000000..fdb8bf912 --- /dev/null +++ b/kubernetes/main/apps/media/plex/ks.yaml @@ -0,0 +1,26 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/kustomize.toolkit.fluxcd.io/kustomization_v1.json +apiVersion: kustomize.toolkit.fluxcd.io/v1 +kind: Kustomization +metadata: + name: &app plex + namespace: flux-system +spec: + targetNamespace: media + commonMetadata: + labels: + app.kubernetes.io/name: *app + dependsOn: + - name: external-secrets-stores + path: ./kubernetes/main/apps/media/plex/app + prune: true + sourceRef: + kind: GitRepository + name: home-kubernetes + wait: false + interval: 30m + timeout: 5m + postBuild: + substitute: + APP: *app + GATUS_PATH: /web/index.html diff --git a/kubernetes/main/apps/media/prowlarr/app/externalsecret.yaml b/kubernetes/main/apps/media/prowlarr/app/externalsecret.yaml new file mode 100644 index 000000000..6affebee7 --- /dev/null +++ b/kubernetes/main/apps/media/prowlarr/app/externalsecret.yaml @@ -0,0 +1,31 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/external-secrets.io/externalsecret_v1beta1.json +apiVersion: external-secrets.io/v1beta1 +kind: ExternalSecret +metadata: + name: &secret prowlarr-secret +spec: + secretStoreRef: + kind: ClusterSecretStore + name: vault-backend + target: + name: *secret + template: + engineVersion: v2 + data: + PROWLARR__AUTH__APIKEY: "{{ .PROWLARR_API_KEY }}" + PROWLARR__POSTGRES__HOST: &dbHost postgres17-rw.database.svc.cluster.local + PROWLARR__POSTGRES__PORT: "5432" + PROWLARR__POSTGRES__USER: &dbUser "{{ .PROWLARR_POSTGRES_USER }}" + PROWLARR__POSTGRES__PASSWORD: &dbPass "{{ .PROWLARR_POSTGRES_PASS }}" + PROWLARR__POSTGRES__MAINDB: &dbName prowlarr + INIT_POSTGRES_DBNAME: *dbName + INIT_POSTGRES_HOST: *dbHost + INIT_POSTGRES_USER: *dbUser + INIT_POSTGRES_PASS: *dbPass + INIT_POSTGRES_SUPER_PASS: "{{ .POSTGRES_SUPER_PASS }}" + dataFrom: + - extract: + key: secrets/api-keys + - extract: + key: secrets/cloudnative-pg diff --git a/kubernetes/main/apps/media/prowlarr/app/gatus-config.yaml b/kubernetes/main/apps/media/prowlarr/app/gatus-config.yaml new file mode 100644 index 000000000..1696e2fe6 --- /dev/null +++ b/kubernetes/main/apps/media/prowlarr/app/gatus-config.yaml @@ -0,0 +1,24 @@ +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: "${APP}-gatus-ep" + labels: + gatus.io/enabled: "true" +data: + config.yaml: | + endpoints: + - name: "${APP}" + group: guarded + url: "https://${APP}.${PUBLIC_DOMAIN}${GATUS_PATH:-/}" + interval: 10m + ui: + hide-hostname: true + hide-url: true + client: + dns-resolver: tcp://172.17.0.10:53 + conditions: + - "[STATUS] == 200" + - "[BODY].status == OK" + alerts: + - type: pushover diff --git a/kubernetes/main/apps/media/prowlarr/app/helmrelease.yaml b/kubernetes/main/apps/media/prowlarr/app/helmrelease.yaml new file mode 100644 index 000000000..fe9eb5dc6 --- /dev/null +++ b/kubernetes/main/apps/media/prowlarr/app/helmrelease.yaml @@ -0,0 +1,134 @@ +--- +# yaml-language-server: $schema=https://raw.githubusercontent.com/bjw-s/helm-charts/main/charts/other/app-template/schemas/helmrelease-helm-v2.schema.json +apiVersion: helm.toolkit.fluxcd.io/v2 +kind: HelmRelease +metadata: + name: &app prowlarr +spec: + interval: 30m + chart: + spec: + verify: + provider: cosign + chart: app-template + version: 3.5.1 + sourceRef: + kind: HelmRepository + name: bjw-s + namespace: flux-system + install: + remediation: + retries: 3 + upgrade: + cleanupOnFail: true + remediation: + strategy: rollback + retries: 3 + + values: + controllers: + prowlarr: + annotations: + reloader.stakater.com/auto: "true" + + initContainers: + init-db: + image: + repository: ghcr.io/onedr0p/postgres-init + tag: 16.4@sha256:e41c745b54485341e00efbd27556f0717623a119f0d5107e5ff831aa1322c76f + envFrom: &envFrom + - secretRef: + name: prowlarr-secret + + containers: + app: + image: + repository: ghcr.io/onedr0p/prowlarr-develop + tag: 1.26.0.4833@sha256:face4aa669a4eb68b041dcf73ed4848cfe8f673826ef3032398a5e267eb1eac0 + env: + PROWLARR__APP__INSTANCENAME: Prowlarr + PROWLARR__APP__THEME: dark + PROWLARR__AUTH__METHOD: External + PROWLARR__AUTH__REQUIRED: DisabledForLocalAddresses + PROWLARR__LOG__DBENABLED: "False" + PROWLARR__LOG__LEVEL: info + PROWLARR__SERVER__PORT: &port 80 + PROWLARR__UPDATE__BRANCH: develop + TZ: Europe/Sofia + envFrom: *envFrom + probes: + liveness: &probes + enabled: true + custom: true + spec: + httpGet: + path: /ping + port: *port + initialDelaySeconds: 0 + periodSeconds: 10 + timeoutSeconds: 1 + failureThreshold: 3 + readiness: *probes + securityContext: + allowPrivilegeEscalation: false + readOnlyRootFilesystem: true + capabilities: { drop: ["ALL"] } + resources: + requests: + cpu: 100m + limits: + memory: 1Gi + + defaultPodOptions: + securityContext: + runAsNonRoot: true + runAsUser: 568 + runAsGroup: 568 + fsGroup: 568 + fsGroupChangePolicy: OnRootMismatch + seccompProfile: { type: RuntimeDefault } + + service: + app: + controller: *app + ports: + http: + port: *port + + ingress: + app: + annotations: + external-dns.alpha.kubernetes.io/target: internal.${PUBLIC_DOMAIN} + nginx.ingress.kubernetes.io/auth-url: "https://$host/oauth2/auth" + nginx.ingress.kubernetes.io/auth-signin: "https://$host/oauth2/start?rd=$escaped_request_uri" + nginx.ingress.kubernetes.io/auth-snippet: | + if ($request_uri ~* "(\/|\/[0-9]+\/)api(/|$|[?])") { + return 200; + } + if ($request_uri ~* "^/ping") { + return 200; + } + if ($request_uri ~* "(\/|\/[0-9]+\/)download(/|$|[?])") { + return 200; + } + nginx.ingress.kubernetes.io/configuration-snippet: | + proxy_set_header Accept-Encoding ""; + sub_filter '' ''; + sub_filter_once on; + className: internal + hosts: + - host: "{{ .Release.Name }}.${PUBLIC_DOMAIN}" + paths: + - path: / + service: + identifier: app + port: http + + persistence: + config: + type: emptyDir + medium: Memory + + tmp: + type: emptyDir + medium: Memory diff --git a/kubernetes/main/apps/media/prowlarr/app/kustomization.yaml b/kubernetes/main/apps/media/prowlarr/app/kustomization.yaml new file mode 100644 index 000000000..8ce00c48d --- /dev/null +++ b/kubernetes/main/apps/media/prowlarr/app/kustomization.yaml @@ -0,0 +1,9 @@ +--- +# yaml-language-server: $schema=https://json.schemastore.org/kustomization +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - ./externalsecret.yaml + - ./helmrelease.yaml + - ./gatus-config.yaml + diff --git a/kubernetes/main/apps/media/prowlarr/ks.yaml b/kubernetes/main/apps/media/prowlarr/ks.yaml new file mode 100644 index 000000000..7be196f4b --- /dev/null +++ b/kubernetes/main/apps/media/prowlarr/ks.yaml @@ -0,0 +1,27 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/kustomize.toolkit.fluxcd.io/kustomization_v1.json +apiVersion: kustomize.toolkit.fluxcd.io/v1 +kind: Kustomization +metadata: + name: &app prowlarr + namespace: flux-system +spec: + targetNamespace: media + commonMetadata: + labels: + app.kubernetes.io/name: *app + dependsOn: + - name: cloudnative-pg-cluster + - name: external-secrets-stores + path: ./kubernetes/main/apps/media/prowlarr/app + prune: true + sourceRef: + kind: GitRepository + name: home-kubernetes + wait: false + interval: 30m + timeout: 5m + postBuild: + substitute: + APP: *app + GATUS_PATH: /ping \ No newline at end of file diff --git a/kubernetes/main/apps/media/qbittorrent/app/externalsecret.yaml b/kubernetes/main/apps/media/qbittorrent/app/externalsecret.yaml new file mode 100644 index 000000000..b1441bb3b --- /dev/null +++ b/kubernetes/main/apps/media/qbittorrent/app/externalsecret.yaml @@ -0,0 +1,89 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/external-secrets.io/externalsecret_v1beta1.json +apiVersion: external-secrets.io/v1beta1 +kind: ExternalSecret +metadata: + name: &secret qbittorrent-secret +spec: + secretStoreRef: + kind: ClusterSecretStore + name: vault-backend + target: + name: *secret + template: + engineVersion: v2 + data: + CROSS_SEED_API_KEY: "{{ .CROSS_SEED_API_KEY }}" + PUSHOVER_TOKEN: "{{ .QBITTORRENT_PUSHOVER_TOKEN }}" + PUSHOVER_USER_KEY: "{{ .PUSHOVER_USER_KEY }}" + dataFrom: + - extract: + key: secrets/api-keys + - extract: + key: secrets/pushover +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/external-secrets.io/externalsecret_v1beta1.json +apiVersion: external-secrets.io/v1beta1 +kind: ExternalSecret +metadata: + name: &secret gluetun-secret +spec: + secretStoreRef: + kind: ClusterSecretStore + name: vault-backend + target: + name: *secret + template: + engineVersion: v2 + data: + WIREGUARD_PRIVATE_KEY: "{{ .WIREGUARD_PRIVATE_KEY }}" + WIREGUARD_PUBLIC_KEY: "{{ .WIREGUARD_PUBLIC_KEY }}" + dataFrom: + - extract: + key: secrets/gluetun +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/external-secrets.io/externalsecret_v1beta1.json +apiVersion: external-secrets.io/v1beta1 +kind: ExternalSecret +metadata: + name: &secret cross-seed-secret +spec: + refreshInterval: 5m + secretStoreRef: + kind: ClusterSecretStore + name: vault-backend + target: + name: *secret + template: + engineVersion: v2 + data: + config.js: | + module.exports = { + action: "inject", + apiKey: "{{.CROSS_SEED_API_KEY}}", + delay: 30, + duplicateCategories: false, + flatLinking: false, + includeEpisodes: true, + includeNonVideos: true, + includeSingleEpisodes: true, + linkCategory: "cross-seed", + linkDir: "/data/downloads/torrents/complete/cross-seed", + linkType: "hardlink", + matchMode: "safe", + outputDir: "/config", + port: 8080, + qbittorrentUrl: "http://localhost", + radarr: ["https://radarr.${PUBLIC_DOMAIN}/?apikey={{ .RADARR_API_KEY }}"], + skipRecheck: true, + sonarr: ["https://sonarr.${PUBLIC_DOMAIN}/?apikey={{ .SONARR_API_KEY }}"], + torrentDir: "/qbittorrent/qBittorrent/BT_backup", + torznab: [ + 3, // IPT + 1, // SA + 2, // TL + ].map(i => `https://prowlarr.${PUBLIC_DOMAIN}/$${i}/api?apikey={{ .PROWLARR_API_KEY }}`), + }; + dataFrom: + - extract: + key: secrets/api-keys diff --git a/kubernetes/main/apps/media/qbittorrent/app/helmrelease.yaml b/kubernetes/main/apps/media/qbittorrent/app/helmrelease.yaml new file mode 100644 index 000000000..553bb6982 --- /dev/null +++ b/kubernetes/main/apps/media/qbittorrent/app/helmrelease.yaml @@ -0,0 +1,385 @@ +--- +# yaml-language-server: $schema=https://raw.githubusercontent.com/bjw-s/helm-charts/main/charts/other/app-template/schemas/helmrelease-helm-v2.schema.json +apiVersion: helm.toolkit.fluxcd.io/v2 +kind: HelmRelease +metadata: + name: qbittorrent +spec: + interval: 30m + chart: + spec: + verify: + provider: cosign + chart: app-template + version: 3.5.1 + sourceRef: + kind: HelmRepository + name: bjw-s + namespace: flux-system + install: + remediation: + retries: 3 + upgrade: + cleanupOnFail: true + remediation: + strategy: rollback + retries: 3 + dependsOn: + - name: volsync + namespace: volsync-system + + values: + controllers: + torrenting: + annotations: + reloader.stakater.com/auto: "true" + + initContainers: + gluetun: + dependsOn: dnsdist + image: + repository: ghcr.io/qdm12/gluetun + tag: latest@sha256:637a3a9daf56975942a1479ca44f574f96812a90e2150334704e2bbd7aa9321d + env: + TZ: &TZ Europe/Sofia + DOT: "off" + DNS_ADDRESS: "127.0.0.2" + VPN_TYPE: wireguard + VPN_SERVICE_PROVIDER: protonvpn + VPN_PORT_FORWARDING: "on" + VPN_PORT_FORWARDING_LISTENING_PORT: 50413 + WIREGUARD_ADDRESSES: 10.2.0.2/32 + FIREWALL_INPUT_PORTS: 80 + FIREWALL_OUTBOUND_SUBNETS: 172.16.0.0/16,172.17.0.0/16 + SERVER_HOSTNAMES: node-ch-15.protonvpn.net + VPN_INTERFACE: tun0 + PUID: 568 + PGID: 568 + VERSION_INFORMATION: "off" + envFrom: + - secretRef: + name: gluetun-secret + restartPolicy: Always + probes: + liveness: + enabled: true + custom: true + spec: + exec: + command: + - /gluetun-entrypoint + - healthcheck + initialDelaySeconds: 0 + periodSeconds: 10 + failureThreshold: 3 + startup: + enabled: true + custom: true + spec: + exec: + command: + - /gluetun-entrypoint + - healthcheck + initialDelaySeconds: 20 + periodSeconds: 10 + failureThreshold: 3 + lifecycle: + postStart: + exec: + command: ["/bin/sh", "-c", "(ip rule del table 51820; ip -6 rule del table 51820) || true"] + securityContext: + runAsNonRoot: false + runAsUser: 0 + runAsGroup: 568 + privileged: false + allowPrivilegeEscalation: false + readOnlyRootFilesystem: false + capabilities: { drop: ["ALL"], add: ["NET_ADMIN","CHOWN"] } + resources: + limits: + squat.ai/tun: 1 + + dnsdist: + image: + repository: docker.io/powerdns/dnsdist-19 + tag: 1.9.7@sha256:6ed810f4d5da4585ccfc2667a794a78319ff2960a43266fc6a160c063d04e1ef + restartPolicy: Always + securityContext: + runAsNonRoot: true + allowPrivilegeEscalation: false + privileged: false + runAsUser: 10001 + runAsGroup: 10001 + capabilities: { drop: ["ALL"] } + + containers: + qbittorrent: + image: + repository: ghcr.io/onedr0p/qbittorrent + tag: 5.0.1@sha256:7fc5af4f7f9c8e4998aaa18b5e2e634757c55f70c23b9bd46b40e09a4c7acda5 + env: + UMASK: "022" + TZ: *TZ + QBT_WEBUI_PORT: &port 80 + QBT_TORRENTING_PORT: 50413 + CROSS_SEED_ENABLED: true + CROSS_SEED_HOST: localhost + CROSS_SEED_PORT: 8080 + CROSS_SEED_SLEEP_INTERVAL: 15 + PUSHOVER_ENABLED: true + envFrom: + - secretRef: + name: qbittorrent-secret + probes: + readiness: + enabled: true + custom: true + spec: + exec: + command: + - /scripts/healthcheck.sh + initialDelaySeconds: 0 + periodSeconds: 10 + failureThreshold: 3 + liveness: + enabled: true + custom: true + spec: + exec: + command: + - /scripts/healthcheck.sh + initialDelaySeconds: 0 + periodSeconds: 10 + failureThreshold: 3 + securityContext: + privileged: false + allowPrivilegeEscalation: false + readOnlyRootFilesystem: true + capabilities: { drop: ["ALL"] } + resources: + requests: + cpu: 100m + limits: + memory: 6Gi + + qbitmanage: + dependsOn: qbittorrent + image: + repository: ghcr.io/stuffanthings/qbit_manage + tag: v4.1.13@sha256:fa623102eeac2c9cda115aa23f7a5bb85af2ab2cffec766b5173c85a073926b9 + env: + TZ: *TZ + probes: + liveness: + enabled: true + readiness: + enabled: false + securityContext: + privileged: false + allowPrivilegeEscalation: false + readOnlyRootFilesystem: true + capabilities: { drop: ["ALL"] } + resources: + requests: + cpu: 10m + limits: + memory: 1024Mi + lifecycle: + postStart: + exec: + command: ["/bin/sh", "-c", "cp /secret/config.yml /config/config.yml"] + + cross-seed: + dependsOn: qbittorrent + image: + repository: ghcr.io/cross-seed/cross-seed + tag: 6.0.0-44@sha256:881ce834570f3b369860e47a0801b89ab10469b3beccf5279f90e041d3725058 + env: + TZ: *TZ + args: ["daemon"] + probes: + liveness: + enabled: false + readiness: + enabled: false + securityContext: + allowPrivilegeEscalation: false + readOnlyRootFilesystem: true + capabilities: { drop: ["ALL"] } + resources: + requests: + cpu: 10m + limits: + memory: 512Mi + + vuetorrent: + dependsOn: qbittorrent + image: + repository: registry.k8s.io/git-sync/git-sync + tag: v4.3.0 + args: + - --repo=https://github.com/WDaan/VueTorrent + - --ref=latest-release + - --period=86400s + - --root=/addons + securityContext: + privileged: false + allowPrivilegeEscalation: false + readOnlyRootFilesystem: false + capabilities: { drop: ["ALL"] } + resources: + requests: + cpu: 10m + memory: 25Mi + + defaultPodOptions: + securityContext: + runAsNonRoot: true + runAsUser: 568 + runAsGroup: 568 + fsGroup: 568 + fsGroupChangePolicy: OnRootMismatch + seccompProfile: { type: RuntimeDefault } + terminationGracePeriodSeconds: 300 + + service: + qbittorrent: + controller: torrenting + primary: true + ports: + http: + port: *port + + ingress: + qbittorrent: + annotations: + external-dns.alpha.kubernetes.io/target: internal.${PUBLIC_DOMAIN} + nginx.ingress.kubernetes.io/auth-url: "https://$host/oauth2/auth" + nginx.ingress.kubernetes.io/auth-signin: "https://$host/oauth2/start?rd=$escaped_request_uri" + nginx.ingress.kubernetes.io/auth-snippet: | + if ($request_uri ~* "^/api(/|$)") { + return 202; + } + nginx.ingress.kubernetes.io/configuration-snippet: | + proxy_set_header Accept-Encoding ""; + sub_filter '' ''; + sub_filter_once on; + className: internal + hosts: + - host: "{{ .Release.Name }}.${PUBLIC_DOMAIN}" + paths: + - path: / + service: + identifier: qbittorrent + port: http + + persistence: + config: + enabled: true + type: persistentVolumeClaim + size: 5Gi + accessMode: ReadWriteOnce + advancedMounts: + torrenting: + qbittorrent: + - path: /config + qbitmanage: + - path: /qBittorrent/BT_backup + subPath: qBittorrent/BT_backup + readOnly: true + cross-seed: + - path: /qbittorrent/qBittorrent/BT_backup + subPath: qBittorrent/BT_backup + readOnly: true + scripts: + type: configMap + name: qbittorrent-scripts + defaultMode: 0550 + advancedMounts: + torrenting: + qbittorrent: + - readOnly: true + + media: + type: nfs + server: nas.${PUBLIC_DOMAIN} + path: /mnt/exos20/data + advancedMounts: + torrenting: + qbitmanage: + - path: /data/downloads/torrents + subPath: downloads/torrents + qbittorrent: + - path: /data/downloads/torrents + subPath: downloads/torrents + cross-seed: + - path: /data/downloads/torrents/complete + subPath: downloads/torrents/complete + + config-qbitmanage: + type: emptyDir + medium: Memory + advancedMounts: + torrenting: + qbitmanage: + - path: /app/config + - path: /config + + config-file-qbitmanage: + type: configMap + name: qbitmanage-config + defaultMode: 0600 + advancedMounts: + torrenting: + qbitmanage: + - path: /secret/config.yml + subPath: config.yml + readOnly: true + + config-cross-seed: + type: emptyDir + medium: Memory + advancedMounts: + torrenting: + cross-seed: + - path: /config + + config-file-cross-seed: + type: secret + name: cross-seed-secret + advancedMounts: + torrenting: + cross-seed: + - path: /config/config.js + subPath: config.js + readOnly: true + + addons: + type: emptyDir + advancedMounts: + torrenting: + qbittorrent: + - path: /addons + readOnly: true + vuetorrent: + - path: /addons + + dnsdist: + type: configMap + name: qbittorrent-dnsdist + defaultMode: 0400 + advancedMounts: + torrenting: + dnsdist: + - path: /etc/dnsdist/dnsdist.conf + subPath: dnsdist.conf + readOnly: true + + gluetun: + type: persistentVolumeClaim + accessMode: ReadWriteOnce + size: 128Mi + advancedMounts: + torrenting: + gluetun: + - path: /tmp diff --git a/kubernetes/main/apps/media/qbittorrent/app/kustomization.yaml b/kubernetes/main/apps/media/qbittorrent/app/kustomization.yaml new file mode 100644 index 000000000..f72775106 --- /dev/null +++ b/kubernetes/main/apps/media/qbittorrent/app/kustomization.yaml @@ -0,0 +1,28 @@ +--- +# yaml-language-server: $schema=https://json.schemastore.org/kustomization +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - ./externalsecret.yaml + - ./helmrelease.yaml + - ./volsync.yaml + - ./resources/gatus-ep.yaml +configMapGenerator: + - name: qbittorrent-loki-rules + files: + - qbittorrent.yaml=./resources/lokirule.yaml + options: + labels: + loki_rule: "true" + - name: qbittorrent-scripts + files: + - post-process.sh=./resources/post-process.sh + - healthcheck.sh=./resources/healthcheck.sh + - name: qbitmanage-config + files: + - config.yml=./resources/qbitmanage-config.yaml + - name: qbittorrent-dnsdist + files: + - ./resources/dnsdist.conf +generatorOptions: + disableNameSuffixHash: true diff --git a/kubernetes/main/apps/media/qbittorrent/app/resources/dnsdist.conf b/kubernetes/main/apps/media/qbittorrent/app/resources/dnsdist.conf new file mode 100644 index 000000000..4a6dcb3c4 --- /dev/null +++ b/kubernetes/main/apps/media/qbittorrent/app/resources/dnsdist.conf @@ -0,0 +1,37 @@ +-- udp/tcp dns listening + setLocal("127.0.0.2:53", {}) + +-- disable security status polling via DNS +setSecurityPollSuffix("") + + -- K8S DNS + newServer({ + address = "172.17.0.10", + pool = "k8s", + healthCheckMode = "lazy", + lazyHealthCheckMode = 'TimeoutOnly', + }) + + -- CloudFlare DNS over TLS + newServer({ + address = "1.1.1.1:853", + tls = "openssl", + subjectName = "cloudflare-dns.com", + validateCertificates = true, + healthCheckMode = "lazy", + lazyHealthCheckMode = 'TimeoutOnly', + }) + newServer({ + address = "1.0.0.1:853", + tls = "openssl", + subjectName = "cloudflare-dns.com", + validateCertificates = true, + healthCheckMode = "lazy", + lazyHealthCheckMode = 'TimeoutOnly', + }) + +-- Routing rules +addAction('cluster.local', PoolAction('k8s')) -- Directs `cluster.local` queries to the Kubernetes pool + +-- Route +addAction('${PUBLIC_DOMAIN}', PoolAction('k8s')) diff --git a/kubernetes/main/apps/media/qbittorrent/app/resources/gatus-ep.yaml b/kubernetes/main/apps/media/qbittorrent/app/resources/gatus-ep.yaml new file mode 100644 index 000000000..88ed66c14 --- /dev/null +++ b/kubernetes/main/apps/media/qbittorrent/app/resources/gatus-ep.yaml @@ -0,0 +1,23 @@ +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: "qbittorrent-gatus-ep" + labels: + gatus.io/enabled: "true" +data: + config.yaml: | + endpoints: + - name: "qbittorrent-version" + group: guarded + url: "https://qbittorrent.${PUBLIC_DOMAIN}/api/v2/app/version" + interval: 5m + ui: + hide-hostname: true + hide-url: true + conditions: + - "[STATUS] == 403" + alerts: + - type: pushover + headers: + Accept: application/json diff --git a/kubernetes/main/apps/media/qbittorrent/app/resources/healthcheck.sh b/kubernetes/main/apps/media/qbittorrent/app/resources/healthcheck.sh new file mode 100644 index 000000000..ba84b8f12 --- /dev/null +++ b/kubernetes/main/apps/media/qbittorrent/app/resources/healthcheck.sh @@ -0,0 +1,10 @@ +#!/bin/bash +http_code=$(curl -s -o /dev/null -w "%{http_code}" http://localhost:80/api/v2/app/version) +if [[ $http_code != 200 ]]; then + log "App status: not up yet, did you enable \"Bypass authentication for clients on localhost\" in the Web UI options?" + exit 1 +else + log "App status: up and running" +fi + +exit 0 diff --git a/kubernetes/main/apps/media/qbittorrent/app/resources/lokirule.yaml b/kubernetes/main/apps/media/qbittorrent/app/resources/lokirule.yaml new file mode 100644 index 000000000..e2f6b0c76 --- /dev/null +++ b/kubernetes/main/apps/media/qbittorrent/app/resources/lokirule.yaml @@ -0,0 +1,14 @@ +--- +groups: + - name: qbittorrent + rules: + - alert: QbittorrentFastResumeRejected + expr: | + sum by (app) (count_over_time({app="qbittorrent"} |~ "(?i)fast resume rejected"[1h])) > 0 + for: 2m + labels: + severity: critical + category: logs + annotations: + app: "{{ $labels.container }}" + summary: "{{ $labels.container }} has a torrent with fast resume rejected" diff --git a/kubernetes/main/apps/media/qbittorrent/app/resources/post-process.sh b/kubernetes/main/apps/media/qbittorrent/app/resources/post-process.sh new file mode 100644 index 000000000..a12a4c286 --- /dev/null +++ b/kubernetes/main/apps/media/qbittorrent/app/resources/post-process.sh @@ -0,0 +1,118 @@ +#!/usr/bin/env bash +# shellcheck disable=SC2154 + +set -euo pipefail + +# User-defined variables +CROSS_SEED_ENABLED="$${CROSS_SEED_ENABLED:-false}" +CROSS_SEED_HOST="$${CROSS_SEED_HOST:-required}" +CROSS_SEED_PORT="$${CROSS_SEED_PORT:-required}" +CROSS_SEED_API_KEY="$${CROSS_SEED_API_KEY:-required}" +CROSS_SEED_SLEEP_INTERVAL="$${CROSS_SEED_SLEEP_INTERVAL:-30}" +PUSHOVER_ENABLED="$${PUSHOVER_ENABLED:-false}" +PUSHOVER_USER_KEY="$${PUSHOVER_USER_KEY:-required}" +PUSHOVER_TOKEN="$${PUSHOVER_TOKEN:-required}" + +# Function to set release variables from SABnzbd +set_sab_vars() { + RELEASE_NAME="$${SAB_FILENAME:-}" + RELEASE_DIR="$${SAB_COMPLETE_DIR:-}" + RELEASE_CAT="$${SAB_CAT:-}" + RELEASE_SIZE="$${SAB_BYTES:-}" + RELEASE_STATUS="$${SAB_PP_STATUS:-}" + RELEASE_INDEXER="$${SAB_URL:-}" + RELEASE_TYPE="NZB" +} + +# Function to set release variables from qBittorrent +set_qb_vars() { + RELEASE_NAME="$1" # %N + RELEASE_DIR="$2" # %F + RELEASE_CAT="$3" # %L + RELEASE_SIZE="$4" # %Z + RELEASE_INDEXER="$5" # %T + RELEASE_STATUS=0 # Always 0 for qBittorrent + RELEASE_TYPE="Torrent" +} + +# Function to send pushover notification +send_pushover_notification() { + local pushover_message status_code json_data + printf -v pushover_message \ + "%s\nCategory: %s\nIndexer: %s\nSize: %s" \ + "$${RELEASE_NAME%.*}" \ + "$${RELEASE_CAT}" \ + "$(trurl --url "$${RELEASE_INDEXER}" --get '{idn:host}')" \ + "$(numfmt --to iec --format "%8.2f" "$${RELEASE_SIZE}")" + + json_data=$(jo \ + token="$${PUSHOVER_TOKEN}" \ + user="$${PUSHOVER_USER_KEY}" \ + title="$${RELEASE_TYPE} Downloaded" \ + message="$${pushover_message}" \ + priority="-2" \ + html="1" + ) + + status_code=$(curl \ + --silent \ + --write-out "%{http_code}" \ + --output /dev/null \ + --request POST \ + --header "Content-Type: application/json" \ + --data-binary "$${json_data}" \ + "https://api.pushover.net/1/messages.json" + ) + + printf "pushover notification returned with HTTP status code %s and payload: %s\n" \ + "$${status_code}" \ + "$(echo "$${json_data}" | jq --compact-output)" >&2 +} + +# Function to search for cross-seed +search_cross_seed() { + local status_code + status_code=$(curl \ + --silent \ + --output /dev/null \ + --write-out "%{http_code}" \ + --request POST \ + --data-urlencode "path=$${RELEASE_DIR}" \ + --header "X-Api-Key: $${CROSS_SEED_API_KEY}" \ + "http://$${CROSS_SEED_HOST}:$${CROSS_SEED_PORT}/api/webhook" + ) + + printf "cross-seed search returned with HTTP status code %s and path %s\n" \ + "$${status_code}" \ + "$${RELEASE_DIR}" >&2 + + sleep "$${CROSS_SEED_SLEEP_INTERVAL}" +} + +main() { + # Determine the source and set release variables accordingly + if env | grep -q "^SAB_"; then + set_sab_vars + else + set_qb_vars "$@" + fi + + # Check if post-processing was successful + if [[ "$${RELEASE_STATUS}" -ne 0 ]]; then + printf "post-processing failed with sabnzbd status code %s\n" \ + "$${RELEASE_STATUS}" >&2 + exit 1 + fi + + # Send pushover notification + if [[ "$${PUSHOVER_ENABLED}" == "true" ]]; then + send_pushover_notification + fi + + # Search for cross-seed + if [[ "$${CROSS_SEED_ENABLED}" == "true" ]]; then + search_cross_seed + fi +} + +main "$@" diff --git a/kubernetes/main/apps/media/qbittorrent/app/resources/qbitmanage-config.yaml b/kubernetes/main/apps/media/qbittorrent/app/resources/qbitmanage-config.yaml new file mode 100644 index 000000000..a2d58ce9e --- /dev/null +++ b/kubernetes/main/apps/media/qbittorrent/app/resources/qbitmanage-config.yaml @@ -0,0 +1,304 @@ +commands: + dry_run: false + cross_seed: false + recheck: false + cat_update: false + tag_update: true + rem_unregistered: true + tag_tracker_error: true + rem_orphaned: true + tag_nohardlinks: true + share_limits: true + skip_qb_version_check: true + skip_cleanup: false + +qbt: + host: localhost:80 + user: "" + pass: "" + +settings: + force_auto_tmm: false # Will force qBittorrent to enable Automatic Torrent Management for each torrent. + tracker_error_tag: issue # Will set the tag of any torrents that do not have a working tracker. + nohardlinks_tag: noHL # Will set the tag of any torrents with no hardlinks. + share_limits_tag: ~share_limit # Will add this tag when applying share limits to provide an easy way to filter torrents by share limit group/priority for each torrent + share_limits_min_seeding_time_tag: MinSeedTimeNotReached # Tag to be added to torrents that have not yet reached the minimum seeding time + share_limits_min_num_seeds_tag: MinSeedsNotMet # Tag to be added to torrents that have not yet reached the minimum number of seeds + share_limits_last_active_tag: LastActiveLimitNotReached # Tag to be added to torrents that have not yet reached the last active limit + cross_seed_tag: cross-seed # Will set the tag of any torrents that are added by cross-seed command + cat_filter_completed: true # Filters for completed torrents only when running cat_update command + share_limits_filter_completed: true # Filters for completed torrents only when running share_limits command + tag_nohardlinks_filter_completed: true # Filters for completed torrents only when running tag_nohardlinks command + cat_update_all: true # Checks and udpates all torrent categories if set to True when running cat_update command, otherwise only update torrents that are uncategorized + + force_auto_tmm_ignore_tags: cross-seed + disable_qbt_default_share_limits: true +directory: + # Do not remove these + # Cross-seed var: # Output directory of cross-seed + # root_dir var: # Root downloads directory used to check for orphaned files, noHL, and RecycleBin. + # remote_dir var: # Path of docker host mapping of root_dir. + # remote_dir must be set if you're running qbit_manage locally and qBittorrent/cross_seed is in a docker + # remote_dir should not be set if qbit_manage is running in a container + # recycle_bin var: # Path of the RecycleBin folder. Default location is set to remote_dir/.RecycleBin + # torrents_dir var: # Path of the your qbittorrent torrents directory. Required for `save_torrents` attribute in recyclebin + # orphaned_dir var: # Path of the the Orphaned Data folder. This is similar to RecycleBin, but only for orphaned data. + remote_dir: /data/downloads/torrents/complete + cross_seed: /your/path/here/ + root_dir: /data/downloads/torrents/complete + recycle_bin: /data/downloads/torrents/.RecycleBin + torrents_dir: /qBittorrent/BT_backup + orphaned_dir: /data/downloads/torrents/orphaned_data + +cat: + # Category & Path Parameters + # : # Path of your save directory. + +cat_change: + # This moves all the torrents from one category to another category. This executes on --cat-update + # WARNING: if the paths are different and Default Torrent Management Mode is set to automatic the files could be moved !!! + # : + +tracker: + # Mandatory + # Tag Parameters + # : # This is the keyword in the tracker url. You can define multiple tracker urls by splitting with `|` delimiter + # Set tag name. Can be a list of tags or a single tag + # tag: + # Set the category based on tracker URL. This category option takes priority over the category defined by save directory + # cat: + # Set this to the notifiarr react name. This is used to add indexer reactions to the notifications sent by Notifiarr + # notifiarr: + animebytes.tv: + tag: AnimeBytes + notifiarr: animebytes + avistaz: + tag: + - Avistaz + - tag2 + - tag3 + notifiarr: avistaz + beyond-hd: + tag: [Beyond-HD, tag2, tag3] + cat: movies + notifiarr: beyondhd + blutopia: + tag: Blutopia + notifiarr: blutopia + cartoonchaos: + tag: CartoonChaos + digitalcore: + tag: DigitalCore + notifiarr: digitalcore + gazellegames: + tag: GGn + hdts: + tag: HDTorrents + landof.tv: + tag: BroadcasTheNet + notifiarr: broadcasthenet + myanonamouse: + tag: MaM + passthepopcorn: + tag: PassThePopcorn + notifiarr: passthepopcorn + privatehd: + tag: PrivateHD + notifiarr: + torrentdb: + tag: TorrentDB + notifiarr: torrentdb + torrentleech|tleechreload: + tag: TorrentLeech + notifiarr: torrentleech + tv-vault: + tag: TV-Vault + stackoverflow|empirehost|bgp: + tag: IPTorrents + notifiarr: iptorrents + speedapp: + tag: speedapp.io + notifiarr: speedapp.io + # The "other" key is a special keyword and if defined will tag any other trackers that don't match the above trackers into this tag + other: + tag: other + +nohardlinks: + # Tag Movies/Series that are not hard linked outside the root directory + # Mandatory to fill out directory parameter above to use this function (root_dir/remote_dir) + # This variable should be set to your category name of your completed movies/completed series in qbit. Acceptable variable can be any category you would like to tag if there are no hardlinks found + movies: + ignore_root_dir: true + movies-imported: + exclude_tags: + - Beyond-HD + - AnimeBytes + - MaM + ignore_root_dir: true + tv: + ignore_root_dir: true + tv-imported: + exclude_tags: + - Beyond-HD + - AnimeBytes + - MaM + ignore_root_dir: true + cross-seed: + ignore_root_dir: false + +share_limits: + + noHL_cross-seed: + priority: 1 + include_all_tags: + - noHL + - cross-seed + categories: + - cross-seed + max_seeding_time: 10m + cleanup: true + custom_tag: sharelimits_noHL_cross-seed + noHL_TorrentLeech: + priority: 2 + include_all_tags: + - noHL + - TorrentLeech + categories: + - movies + - movies-imported + - tv + - tv-imported + max_seeding_time: 10d + cleanup: true + resume_torrent_after_change: true + add_group_to_tag: true + custom_tag: sharelimits_noHL_TorrentLeech + + noHL_speedapp.io: + priority: 3 + include_all_tags: + - noHL + - speedapp.io + categories: + - movies + - movies-imported + - tv + - tv-imported + max_seeding_time: 3d + cleanup: true + resume_torrent_after_change: true + add_group_to_tag: true + custom_tag: sharelimits_noHL_speedapp.io + + noHL_IPTorrents: + priority: 4 + include_all_tags: + - noHL + - IPTorrents + categories: + - movies + - movies-imported + - tv + - tv-imported + max_seeding_time: 16d + cleanup: true + resume_torrent_after_change: true + add_group_to_tag: true + custom_tag: sharelimits_noHL_IPTorrents + + TorrentLeech: + priority: 5 + include_all_tags: + - TorrentLeech + categories: + - movies + - movies-imported + - tv + - tv-imported + - cross-seed + max_seeding_time: 30d + cleanup: true + resume_torrent_after_change: true + add_group_to_tag: true + custom_tag: sharelimits_TorrentLeech + + IPTorrents: + priority: 6 + include_all_tags: + - IPTorrents + categories: + - movies + - movies-imported + - tv + - tv-imported + - cross-seed + max_seeding_time: 30d + cleanup: true + resume_torrent_after_change: true + add_group_to_tag: true + custom_tag: sharelimits_IPTorrents + + speedapp.io: + priority: 7 + include_all_tags: + - speedapp.io + categories: + - movies + - movies-imported + - tv + - tv-imported + - cross-seed + max_seeding_time: 30d + cleanup: true + resume_torrent_after_change: true + add_group_to_tag: true + custom_tag: sharelimits_speedapp.io + + general: + priority: 8 + include_any_tags: + - speedapp.io + - IPTorrents + - TorrentLeech + categories: + - general-completed + max_ratio: 5 + max_seeding_time: 30d + cleanup: false + resume_torrent_after_change: true + add_group_to_tag: true + custom_tag: sharelimits_general + +recyclebin: + # Recycle Bin method of deletion will move files into the recycle bin (Located in /root_dir/.RecycleBin) instead of directly deleting them in qbit + # By default the Recycle Bin will be emptied on every run of the qbit_manage script if empty_after_x_days is defined. + enabled: true + # empty_after_x_days var: + # Will automatically remove all files and folders in recycle bin after x days. (Checks every script run) + # If this variable is not defined it, the RecycleBin will never be emptied. + # WARNING: Setting this variable to 0 will delete all files immediately upon script run! + empty_after_x_days: 7 + # save_torrents var: + # If this option is set to true you MUST fill out the torrents_dir in the directory attribute. + # This will save a copy of your .torrent and .fastresume file in the recycle bin before deleting it from qbittorrent + save_torrents: true + # split_by_category var: + # This will split the recycle bin folder by the save path defined in the `cat` attribute + # and add the base folder name of the recycle bin that was defined in the `recycle_bin` sub-attribute under directory. + split_by_category: true + +orphaned: + # Orphaned files are those in the root_dir download directory that are not referenced by any active torrents. + # Will automatically remove all files and folders in orphaned data after x days. (Checks every script run) + # If this variable is not defined it, the orphaned data will never be emptied. + # WARNING: Setting this variable to 0 will delete all files immediately upon script run! + empty_after_x_days: 7 + # File patterns that will not be considered orphaned files. Handy for generated files that aren't part of the torrent but belong with the torrent's files + exclude_patterns: + - '**/.DS_Store' + - '**/Thumbs.db' + - '**/@eaDir' + - '**/general/*' + - '**/*.!qB' + - '**/*_unpackerred' + max_orphaned_files_to_delete: 50000 diff --git a/kubernetes/main/apps/media/qbittorrent/app/volsync.yaml b/kubernetes/main/apps/media/qbittorrent/app/volsync.yaml new file mode 100644 index 000000000..b50310653 --- /dev/null +++ b/kubernetes/main/apps/media/qbittorrent/app/volsync.yaml @@ -0,0 +1,47 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.ok8.sh/external-secrets.io/externalsecret_v1beta1.json +apiVersion: external-secrets.io/v1beta1 +kind: ExternalSecret +metadata: + name: &secret qbittorrent-volsync-secret +spec: + secretStoreRef: + kind: ClusterSecretStore + name: vault-backend + target: + name: *secret + creationPolicy: Owner + template: + engineVersion: v2 + data: + RESTIC_REPOSITORY: '{{ .RESTIC_REPOSITORY }}/qbittorrent/volsync/config-volsync-config' + RESTIC_PASSWORD: '{{ .ENCRYPTION_KEY }}' + AWS_ACCESS_KEY_ID: '{{ .CF_ACCESS_KEY_ID }}' + AWS_SECRET_ACCESS_KEY: '{{ .CF_SECRET_ACCESS_KEY }}' + dataFrom: + - extract: + key: secrets/volsync + - extract: + key: secrets/cloudflare +--- +# yaml-language-server: $schema=https://kubernetes-schemas.ok8.sh/volsync.backube/replicationsource_v1alpha1.json +apiVersion: volsync.backube/v1alpha1 +kind: ReplicationSource +metadata: + name: qbittorrent +spec: + sourcePVC: qbittorrent-config + trigger: + schedule: "0 7 * * *" + restic: + copyMethod: Snapshot + pruneIntervalDays: 7 + repository: qbittorrent-volsync-secret + cacheCapacity: 2Gi + moverSecurityContext: + runAsUser: 568 + runAsGroup: 568 + fsGroup: 568 + retain: + daily: 7 + within: 3d diff --git a/kubernetes/main/apps/media/qbittorrent/ks.yaml b/kubernetes/main/apps/media/qbittorrent/ks.yaml new file mode 100644 index 000000000..502f68fe9 --- /dev/null +++ b/kubernetes/main/apps/media/qbittorrent/ks.yaml @@ -0,0 +1,22 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/kustomize.toolkit.fluxcd.io/kustomization_v1.json +apiVersion: kustomize.toolkit.fluxcd.io/v1 +kind: Kustomization +metadata: + name: &app qbittorrent + namespace: flux-system +spec: + targetNamespace: media + commonMetadata: + labels: + app.kubernetes.io/name: *app + dependsOn: + - name: external-secrets-stores + path: ./kubernetes/main/apps/media/qbittorrent/app + prune: true + sourceRef: + kind: GitRepository + name: home-kubernetes + wait: false + interval: 30m + timeout: 5m diff --git a/kubernetes/main/apps/media/radarr/app/externalsecret.yaml b/kubernetes/main/apps/media/radarr/app/externalsecret.yaml new file mode 100644 index 000000000..3b6feb2e8 --- /dev/null +++ b/kubernetes/main/apps/media/radarr/app/externalsecret.yaml @@ -0,0 +1,35 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/external-secrets.io/externalsecret_v1beta1.json +apiVersion: external-secrets.io/v1beta1 +kind: ExternalSecret +metadata: + name: &secret radarr-secret +spec: + secretStoreRef: + kind: ClusterSecretStore + name: vault-backend + target: + name: *secret + template: + engineVersion: v2 + data: + RADARR__AUTH__APIKEY: "{{ .RADARR_API_KEY }}" + RADARR__POSTGRES__HOST: &dbHost postgres17-rw.database.svc.cluster.local + RADARR__POSTGRES__PORT: "5432" + RADARR__POSTGRES__USER: &dbUser "{{ .RADARR_POSTGRES_USER }}" + RADARR__POSTGRES__PASSWORD: &dbPass "{{ .RADARR_POSTGRES_PASSWORD }}" + RADARR__POSTGRES__MAINDB: &dbName radarr + PUSHOVER_TOKEN: "{{ .RADARR_PUSHOVER_TOKEN }}" + PUSHOVER_USER_KEY: "{{ .PUSHOVER_USER_KEY }}" + INIT_POSTGRES_DBNAME: *dbName + INIT_POSTGRES_HOST: *dbHost + INIT_POSTGRES_USER: *dbUser + INIT_POSTGRES_PASS: *dbPass + INIT_POSTGRES_SUPER_PASS: "{{ .POSTGRES_SUPER_PASS }}" + dataFrom: + - extract: + key: secrets/cloudnative-pg + - extract: + key: secrets/pushover + - extract: + key: secrets/api-keys diff --git a/kubernetes/main/apps/media/radarr/app/gatus-config.yaml b/kubernetes/main/apps/media/radarr/app/gatus-config.yaml new file mode 100644 index 000000000..1696e2fe6 --- /dev/null +++ b/kubernetes/main/apps/media/radarr/app/gatus-config.yaml @@ -0,0 +1,24 @@ +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: "${APP}-gatus-ep" + labels: + gatus.io/enabled: "true" +data: + config.yaml: | + endpoints: + - name: "${APP}" + group: guarded + url: "https://${APP}.${PUBLIC_DOMAIN}${GATUS_PATH:-/}" + interval: 10m + ui: + hide-hostname: true + hide-url: true + client: + dns-resolver: tcp://172.17.0.10:53 + conditions: + - "[STATUS] == 200" + - "[BODY].status == OK" + alerts: + - type: pushover diff --git a/kubernetes/main/apps/media/radarr/app/helmrelease.yaml b/kubernetes/main/apps/media/radarr/app/helmrelease.yaml new file mode 100644 index 000000000..387dd7f86 --- /dev/null +++ b/kubernetes/main/apps/media/radarr/app/helmrelease.yaml @@ -0,0 +1,161 @@ +--- +# yaml-language-server: $schema=https://raw.githubusercontent.com/bjw-s/helm-charts/main/charts/other/app-template/schemas/helmrelease-helm-v2.schema.json +apiVersion: helm.toolkit.fluxcd.io/v2 +kind: HelmRelease +metadata: + name: &app radarr +spec: + interval: 30m + chart: + spec: + verify: + provider: cosign + chart: app-template + version: 3.5.1 + sourceRef: + kind: HelmRepository + name: bjw-s + namespace: flux-system + install: + remediation: + retries: 3 + upgrade: + cleanupOnFail: true + remediation: + strategy: rollback + retries: 3 + dependsOn: + - name: topolvm + namespace: kube-system + + values: + controllers: + radarr: + annotations: + reloader.stakater.com/auto: "true" + + initContainers: + init-db: + image: + repository: ghcr.io/onedr0p/postgres-init + tag: 16.4@sha256:e41c745b54485341e00efbd27556f0717623a119f0d5107e5ff831aa1322c76f + envFrom: &envFrom + - secretRef: + name: radarr-secret + + containers: + app: + image: + repository: ghcr.io/onedr0p/radarr-develop + tag: 5.15.0.9412@sha256:3d922f5f80935c1aadbe49597cb4cd509e2890ca0c01d6778ac54825012fdde4 + env: + RADARR__APP__INSTANCENAME: Radarr + RADARR__APP__THEME: dark + RADARR__AUTH__METHOD: External + RADARR__AUTH__REQUIRED: DisabledForLocalAddresses + RADARR__LOG__DBENABLED: "False" + RADARR__LOG__LEVEL: info + RADARR__SERVER__PORT: &port 80 + RADARR__UPDATE__BRANCH: develop + TZ: Europe/Sofia + envFrom: *envFrom + probes: + liveness: &probes + enabled: true + custom: true + spec: + httpGet: + path: /ping + port: *port + initialDelaySeconds: 0 + periodSeconds: 10 + timeoutSeconds: 1 + failureThreshold: 3 + readiness: *probes + securityContext: + allowPrivilegeEscalation: false + readOnlyRootFilesystem: true + capabilities: { drop: ["ALL"] } + resources: + requests: + cpu: 100m + memory: 512Mi + limits: + memory: 4Gi + + defaultPodOptions: + securityContext: + runAsNonRoot: true + runAsUser: 568 + runAsGroup: 568 + fsGroup: 568 + fsGroupChangePolicy: OnRootMismatch + seccompProfile: { type: RuntimeDefault } + + service: + app: + controller: *app + ports: + http: + port: *port + + ingress: + app: + annotations: + nginx.ingress.kubernetes.io/auth-url: "https://$host/oauth2/auth" + nginx.ingress.kubernetes.io/auth-signin: "https://$host/oauth2/start?rd=$escaped_request_uri" + nginx.ingress.kubernetes.io/auth-snippet: | + if ($request_uri ~* "^/api(/|$)") { + return 200; + } + if ($request_uri ~* "^/ping") { + return 200; + } + external-dns.alpha.kubernetes.io/target: internal.${PUBLIC_DOMAIN} + nginx.ingress.kubernetes.io/configuration-snippet: | + proxy_set_header Accept-Encoding ""; + sub_filter '' ''; + sub_filter_once on; + className: internal + hosts: + - host: "{{ .Release.Name }}.${PUBLIC_DOMAIN}" + paths: + - path: / + service: + identifier: app + port: http + + persistence: + config: + enabled: true + type: persistentVolumeClaim + size: 15Gi + accessMode: ReadWriteOnce + advancedMounts: + radarr: + app: + - path: /config + + scripts: + type: configMap + name: radarr-configmap + defaultMode: 0550 + advancedMounts: + radarr: + app: + - path: /scripts/pushover-notify.sh + subPath: pushover-notify.sh + readOnly: true + + tmp: + type: emptyDir + medium: Memory + + media: + type: nfs + server: nas.${PUBLIC_DOMAIN} + path: /mnt/exos20/data + advancedMounts: + radarr: + app: + - path: /data diff --git a/kubernetes/main/apps/media/radarr/app/kustomization.yaml b/kubernetes/main/apps/media/radarr/app/kustomization.yaml new file mode 100644 index 000000000..53ac843b4 --- /dev/null +++ b/kubernetes/main/apps/media/radarr/app/kustomization.yaml @@ -0,0 +1,16 @@ +--- +# yaml-language-server: $schema=https://json.schemastore.org/kustomization +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - ./externalsecret.yaml + - ./helmrelease.yaml + - ./gatus-config.yaml +configMapGenerator: + - name: radarr-configmap + files: + - pushover-notify.sh=./resources/pushover-notify.sh +generatorOptions: + disableNameSuffixHash: true + annotations: + kustomize.toolkit.fluxcd.io/substitute: disabled diff --git a/kubernetes/main/apps/media/radarr/app/resources/pushover-notify.sh b/kubernetes/main/apps/media/radarr/app/resources/pushover-notify.sh new file mode 100644 index 000000000..54b669101 --- /dev/null +++ b/kubernetes/main/apps/media/radarr/app/resources/pushover-notify.sh @@ -0,0 +1,85 @@ +#!/usr/bin/env bash +# shellcheck disable=SC2154 +set -euo pipefail + +# User defined variables for pushover +PUSHOVER_USER_KEY="${PUSHOVER_USER_KEY:-required}" +PUSHOVER_TOKEN="${PUSHOVER_TOKEN:-required}" +PUSHOVER_PRIORITY="${PUSHOVER_PRIORITY:-"-2"}" + +if [[ "${radarr_eventtype:-}" == "Test" ]]; then + PUSHOVER_PRIORITY="1" + printf -v PUSHOVER_TITLE \ + "Test Notification" + printf -v PUSHOVER_MESSAGE \ + "Howdy this is a test notification from %s" \ + "${radarr_instancename:-Radarr}" + printf -v PUSHOVER_URL \ + "%s" \ + "${radarr_applicationurl:-localhost}" + printf -v PUSHOVER_URL_TITLE \ + "Open %s" \ + "${radarr_instancename:-Radarr}" +fi + +if [[ "${radarr_eventtype:-}" == "Download" ]]; then + printf -v PUSHOVER_TITLE \ + "Movie %s" \ + "$( [[ "${radarr_isupgrade}" == "True" ]] && echo "Upgraded" || echo "Downloaded" )" + printf -v PUSHOVER_MESSAGE \ + "%s (%s)\n%s\n\nClient: %s\nQuality: %s\nSize: %s" \ + "${radarr_movie_title}" \ + "${radarr_movie_year}" \ + "${radarr_movie_overview}" \ + "${radarr_download_client:-Unknown}" \ + "${radarr_moviefile_quality:-Unknown}" \ + "$(numfmt --to iec --format "%8.2f" "${radarr_release_size:-0}")" + printf -v PUSHOVER_URL \ + "%s/movie/%s" \ + "${radarr_applicationurl:-localhost}" "${radarr_movie_tmdbid}" + printf -v PUSHOVER_URL_TITLE \ + "View movie in %s" \ + "${radarr_instancename:-Radarr}" +fi + +if [[ "${radarr_eventtype:-}" == "ManualInteractionRequired" ]]; then + PUSHOVER_PRIORITY="1" + printf -v PUSHOVER_TITLE \ + "Movie import requires intervention" + printf -v PUSHOVER_MESSAGE \ + "%s (%s)\nClient: %s" \ + "${radarr_movie_title}" \ + "${radarr_movie_year}" \ + "${radarr_download_client:-Unknown}" + printf -v PUSHOVER_URL \ + "%s/activity/queue" \ + "${radarr_applicationurl:-localhost}" + printf -v PUSHOVER_URL_TITLE \ + "View queue in %s" \ + "${radarr_instancename:-Radarr}" +fi + +json_data=$(jo \ + token="${PUSHOVER_TOKEN}" \ + user="${PUSHOVER_USER_KEY}" \ + title="${PUSHOVER_TITLE}" \ + message="${PUSHOVER_MESSAGE}" \ + url="${PUSHOVER_URL}" \ + url_title="${PUSHOVER_URL_TITLE}" \ + priority="${PUSHOVER_PRIORITY}" \ + html="1" +) + +status_code=$(curl \ + --silent \ + --write-out "%{http_code}" \ + --output /dev/null \ + --request POST \ + --header "Content-Type: application/json" \ + --data-binary "${json_data}" \ + "https://api.pushover.net/1/messages.json" \ +) + +printf "pushover notification returned with HTTP status code %s and payload: %s\n" \ + "${status_code}" \ + "$(echo "${json_data}" | jq --compact-output)" >&2 diff --git a/kubernetes/main/apps/media/radarr/ks.yaml b/kubernetes/main/apps/media/radarr/ks.yaml new file mode 100644 index 000000000..f5fa638b1 --- /dev/null +++ b/kubernetes/main/apps/media/radarr/ks.yaml @@ -0,0 +1,27 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/kustomize.toolkit.fluxcd.io/kustomization_v1.json +apiVersion: kustomize.toolkit.fluxcd.io/v1 +kind: Kustomization +metadata: + name: &app radarr + namespace: flux-system +spec: + targetNamespace: media + commonMetadata: + labels: + app.kubernetes.io/name: *app + dependsOn: + - name: cloudnative-pg-cluster + - name: external-secrets-stores + path: ./kubernetes/main/apps/media/radarr/app + prune: true + sourceRef: + kind: GitRepository + name: home-kubernetes + wait: false + interval: 30m + timeout: 5m + postBuild: + substitute: + APP: *app + GATUS_PATH: /ping diff --git a/kubernetes/main/apps/media/sabnzbd/app/externalsecret.yaml b/kubernetes/main/apps/media/sabnzbd/app/externalsecret.yaml new file mode 100644 index 000000000..ce972da67 --- /dev/null +++ b/kubernetes/main/apps/media/sabnzbd/app/externalsecret.yaml @@ -0,0 +1,25 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/external-secrets.io/externalsecret_v1beta1.json +apiVersion: external-secrets.io/v1beta1 +kind: ExternalSecret +metadata: + name: &secret sabnzbd-secret +spec: + secretStoreRef: + kind: ClusterSecretStore + name: vault-backend + target: + name: *secret + template: + engineVersion: v2 + data: + CROSS_SEED_API_KEY: "{{ .CROSS_SEED_API_KEY }}" + PUSHOVER_TOKEN: "{{ .SABNZBD_PUSHOVER_TOKEN }}" + PUSHOVER_USER_KEY: "{{ .PUSHOVER_USER_KEY }}" + SABNZBD__API_KEY: &apiKey "{{ .SABNZBD_API_KEY }}" + SABNZBD__NZB_KEY: *apiKey + dataFrom: + - extract: + key: secrets/api-keys + - extract: + key: pushover diff --git a/kubernetes/main/apps/media/sabnzbd/app/gatus-config.yaml b/kubernetes/main/apps/media/sabnzbd/app/gatus-config.yaml new file mode 100644 index 000000000..014e9dc9f --- /dev/null +++ b/kubernetes/main/apps/media/sabnzbd/app/gatus-config.yaml @@ -0,0 +1,24 @@ +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: "${APP}-gatus-ep" + labels: + gatus.io/enabled: "true" +data: + config.yaml: | + endpoints: + - name: "${APP}" + group: guarded + url: "https://${APP}.${PUBLIC_DOMAIN}${GATUS_PATH:-/}" + interval: 10m + ui: + hide-hostname: true + hide-url: true + client: + dns-resolver: tcp://172.17.0.10:53 + conditions: + - "[STATUS] == 200" + - "has([BODY].version) == true" + alerts: + - type: pushover diff --git a/kubernetes/main/apps/media/sabnzbd/app/helmrelease.yaml b/kubernetes/main/apps/media/sabnzbd/app/helmrelease.yaml new file mode 100644 index 000000000..27775b5ed --- /dev/null +++ b/kubernetes/main/apps/media/sabnzbd/app/helmrelease.yaml @@ -0,0 +1,152 @@ +--- +# yaml-language-server: $schema=https://raw.githubusercontent.com/bjw-s/helm-charts/main/charts/other/app-template/schemas/helmrelease-helm-v2.schema.json +apiVersion: helm.toolkit.fluxcd.io/v2 +kind: HelmRelease +metadata: + name: sabnzbd +spec: + interval: 30m + chart: + spec: + verify: + provider: cosign + chart: app-template + version: 3.5.1 + sourceRef: + kind: HelmRepository + name: bjw-s + namespace: flux-system + install: + remediation: + retries: 3 + upgrade: + cleanupOnFail: true + remediation: + strategy: rollback + retries: 3 + dependsOn: + - name: volsync + namespace: volsync-system + + values: + controllers: + sabnzbd: + annotations: + reloader.stakater.com/auto: "true" + + containers: + app: + image: + repository: ghcr.io/onedr0p/sabnzbd + tag: 4.3.3@sha256:86c645db93affcbf01cc2bce2560082bfde791009e1506dba68269b9c50bc341 + env: + TZ: Europe/Sofia + SABNZBD__PORT: &port 80 + SABNZBD__HOST_WHITELIST_ENTRIES: >- + sabnzbd, + sabnzbd.media, + sabnzbd.media.svc, + sabnzbd.media.svc.cluster, + sabnzbd.media.svc.cluster.local, + sabnzbd.${PUBLIC_DOMAIN} + CROSS_SEED_HOST: cross-seed.media.svc.cluster.local + CROSS_SEED_ENABLED: true + CROSS_SEED_PORT: 80 + CROSS_SEED_SLEEP_INTERVAL: 30 + PUSHOVER_ENABLED: true + envFrom: + - secretRef: + name: sabnzbd-secret + probes: + liveness: &probes + enabled: true + custom: true + spec: + httpGet: + path: /api?mode=version + port: *port + initialDelaySeconds: 0 + periodSeconds: 10 + timeoutSeconds: 1 + failureThreshold: 3 + readiness: *probes + securityContext: + allowPrivilegeEscalation: false + readOnlyRootFilesystem: true + capabilities: { drop: ["ALL"] } + resources: + requests: + cpu: 100m + memory: 512Mi + limits: + memory: 4Gi + + defaultPodOptions: + securityContext: + runAsNonRoot: true + runAsUser: 568 + runAsGroup: 568 + fsGroup: 568 + fsGroupChangePolicy: OnRootMismatch + seccompProfile: { type: RuntimeDefault } + + service: + app: + controller: sabnzbd + ports: + http: + port: *port + + ingress: + app: + annotations: + external-dns.alpha.kubernetes.io/target: internal.${PUBLIC_DOMAIN} + nginx.ingress.kubernetes.io/auth-url: "https://$host/oauth2/auth" + nginx.ingress.kubernetes.io/auth-signin: "https://$host/oauth2/start?rd=$escaped_request_uri" + nginx.ingress.kubernetes.io/auth-snippet: | + if ($request_uri ~* "(\/|\/[0-9]+\/)api(/|$|[?])") { + return 202; + } + nginx.ingress.kubernetes.io/configuration-snippet: | + proxy_set_header Accept-Encoding ""; + sub_filter '' ''; + sub_filter_once on; + className: internal + hosts: + - host: "{{ .Release.Name }}.${PUBLIC_DOMAIN}" + paths: + - path: / + service: + identifier: app + port: http + + persistence: + config: + enabled: true + type: persistentVolumeClaim + size: 2Gi + accessMode: ReadWriteOnce + + logs: + type: emptyDir + globalMounts: + - path: /config/logs + + tmp: + type: emptyDir + medium: Memory + + scripts: + type: configMap + name: sabnzbd-scripts + defaultMode: 0550 + globalMounts: + - readOnly: true + + media: + type: nfs + server: nas.${PUBLIC_DOMAIN} + path: /mnt/exos20/data + globalMounts: + - path: /data/downloads/usenet + subPath: downloads/usenet diff --git a/kubernetes/main/apps/media/sabnzbd/app/kustomization.yaml b/kubernetes/main/apps/media/sabnzbd/app/kustomization.yaml new file mode 100644 index 000000000..f13c3c4d0 --- /dev/null +++ b/kubernetes/main/apps/media/sabnzbd/app/kustomization.yaml @@ -0,0 +1,17 @@ +--- +# yaml-language-server: $schema=https://json.schemastore.org/kustomization +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - ./externalsecret.yaml + - ./helmrelease.yaml + - ./volsync.yaml + - ./gatus-config.yaml +configMapGenerator: + - name: sabnzbd-scripts + files: + - post-process.sh=./resources/post-process.sh +generatorOptions: + disableNameSuffixHash: true + annotations: + kustomize.toolkit.fluxcd.io/substitute: disabled diff --git a/kubernetes/main/apps/media/sabnzbd/app/resources/post-process.sh b/kubernetes/main/apps/media/sabnzbd/app/resources/post-process.sh new file mode 100644 index 000000000..b2fc9d6db --- /dev/null +++ b/kubernetes/main/apps/media/sabnzbd/app/resources/post-process.sh @@ -0,0 +1,118 @@ +#!/usr/bin/env bash +# shellcheck disable=SC2154 + +set -euo pipefail + +# User-defined variables +CROSS_SEED_ENABLED="${CROSS_SEED_ENABLED:-false}" +CROSS_SEED_HOST="${CROSS_SEED_HOST:-required}" +CROSS_SEED_PORT="${CROSS_SEED_PORT:-required}" +CROSS_SEED_API_KEY="${CROSS_SEED_API_KEY:-required}" +CROSS_SEED_SLEEP_INTERVAL="${CROSS_SEED_SLEEP_INTERVAL:-30}" +PUSHOVER_ENABLED="${PUSHOVER_ENABLED:-false}" +PUSHOVER_USER_KEY="${PUSHOVER_USER_KEY:-required}" +PUSHOVER_TOKEN="${PUSHOVER_TOKEN:-required}" + +# Function to set release variables from SABnzbd +set_sab_vars() { + RELEASE_NAME="${SAB_FILENAME:-}" + RELEASE_DIR="${SAB_COMPLETE_DIR:-}" + RELEASE_CAT="${SAB_CAT:-}" + RELEASE_SIZE="${SAB_BYTES:-}" + RELEASE_STATUS="${SAB_PP_STATUS:-}" + RELEASE_INDEXER="${SAB_URL:-}" + RELEASE_TYPE="NZB" +} + +# Function to set release variables from qBittorrent +set_qb_vars() { + RELEASE_NAME="$1" # %N + RELEASE_DIR="$2" # %F + RELEASE_CAT="$3" # %L + RELEASE_SIZE="$4" # %Z + RELEASE_INDEXER="$5" # %T + RELEASE_STATUS=0 # Always 0 for qBittorrent + RELEASE_TYPE="Torrent" +} + +# Function to send pushover notification +send_pushover_notification() { + local pushover_message status_code json_data + printf -v pushover_message \ + "%s\nCategory: %s\nIndexer: %s\nSize: %s" \ + "${RELEASE_NAME%.*}" \ + "${RELEASE_CAT}" \ + "$(trurl --url "${RELEASE_INDEXER}" --get '{idn:host}')" \ + "$(numfmt --to iec --format "%8.2f" "${RELEASE_SIZE}")" + + json_data=$(jo \ + token="${PUSHOVER_TOKEN}" \ + user="${PUSHOVER_USER_KEY}" \ + title="${RELEASE_TYPE} Downloaded" \ + message="${pushover_message}" \ + priority="-2" \ + html="1" + ) + + status_code=$(curl \ + --silent \ + --write-out "%{http_code}" \ + --output /dev/null \ + --request POST \ + --header "Content-Type: application/json" \ + --data-binary "${json_data}" \ + "https://api.pushover.net/1/messages.json" + ) + + printf "pushover notification returned with HTTP status code %s and payload: %s\n" \ + "${status_code}" \ + "$(echo "${json_data}" | jq --compact-output)" >&2 +} + +# Function to search for cross-seed +search_cross_seed() { + local status_code + status_code=$(curl \ + --silent \ + --output /dev/null \ + --write-out "%{http_code}" \ + --request POST \ + --data-urlencode "path=${RELEASE_DIR}" \ + --header "X-Api-Key: ${CROSS_SEED_API_KEY}" \ + "http://${CROSS_SEED_HOST}:${CROSS_SEED_PORT}/api/webhook" + ) + + printf "cross-seed search returned with HTTP status code %s and path %s\n" \ + "${status_code}" \ + "${RELEASE_DIR}" >&2 + + sleep "${CROSS_SEED_SLEEP_INTERVAL}" +} + +main() { + # Determine the source and set release variables accordingly + if env | grep -q "^SAB_"; then + set_sab_vars + else + set_qb_vars "$@" + fi + + # Check if post-processing was successful + if [[ "${RELEASE_STATUS}" -ne 0 ]]; then + printf "post-processing failed with sabnzbd status code %s\n" \ + "${RELEASE_STATUS}" >&2 + exit 1 + fi + + # Send pushover notification + if [[ "${PUSHOVER_ENABLED}" == "true" ]]; then + send_pushover_notification + fi + + # Search for cross-seed + if [[ "${CROSS_SEED_ENABLED}" == "true" ]]; then + search_cross_seed + fi +} + +main "$@" diff --git a/kubernetes/main/apps/media/sabnzbd/app/volsync.yaml b/kubernetes/main/apps/media/sabnzbd/app/volsync.yaml new file mode 100644 index 000000000..da4451987 --- /dev/null +++ b/kubernetes/main/apps/media/sabnzbd/app/volsync.yaml @@ -0,0 +1,47 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.ok8.sh/external-secrets.io/externalsecret_v1beta1.json +apiVersion: external-secrets.io/v1beta1 +kind: ExternalSecret +metadata: + name: &secret sabnzbd-volsync-secret +spec: + secretStoreRef: + kind: ClusterSecretStore + name: vault-backend + target: + name: *secret + creationPolicy: Owner + template: + engineVersion: v2 + data: + RESTIC_REPOSITORY: '{{ .RESTIC_REPOSITORY }}/sabnzbd/volsync/config-volsync-config' + RESTIC_PASSWORD: '{{ .ENCRYPTION_KEY }}' + AWS_ACCESS_KEY_ID: '{{ .CF_ACCESS_KEY_ID }}' + AWS_SECRET_ACCESS_KEY: '{{ .CF_SECRET_ACCESS_KEY }}' + dataFrom: + - extract: + key: secrets/volsync + - extract: + key: secrets/cloudflare +--- +# yaml-language-server: $schema=https://kubernetes-schemas.ok8.sh/volsync.backube/replicationsource_v1alpha1.json +apiVersion: volsync.backube/v1alpha1 +kind: ReplicationSource +metadata: + name: sabnzbd +spec: + sourcePVC: sabnzbd-config + trigger: + schedule: "0 7 * * *" + restic: + copyMethod: Snapshot + pruneIntervalDays: 7 + repository: sabnzbd-volsync-secret + cacheCapacity: 2Gi + moverSecurityContext: + runAsUser: 568 + runAsGroup: 568 + fsGroup: 568 + retain: + daily: 7 + within: 3d diff --git a/kubernetes/main/apps/media/sabnzbd/ks.yaml b/kubernetes/main/apps/media/sabnzbd/ks.yaml new file mode 100644 index 000000000..a93524885 --- /dev/null +++ b/kubernetes/main/apps/media/sabnzbd/ks.yaml @@ -0,0 +1,26 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/kustomize.toolkit.fluxcd.io/kustomization_v1.json +apiVersion: kustomize.toolkit.fluxcd.io/v1 +kind: Kustomization +metadata: + name: &app sabnzbd + namespace: flux-system +spec: + targetNamespace: media + commonMetadata: + labels: + app.kubernetes.io/name: *app + dependsOn: + - name: external-secrets-stores + path: ./kubernetes/main/apps/media/sabnzbd/app + prune: true + sourceRef: + kind: GitRepository + name: home-kubernetes + wait: false + interval: 30m + timeout: 5m + postBuild: + substitute: + APP: *app + GATUS_PATH: /api?mode=version diff --git a/kubernetes/main/apps/media/sonarr/app/externalsecret.yaml b/kubernetes/main/apps/media/sonarr/app/externalsecret.yaml new file mode 100644 index 000000000..2b5fa75af --- /dev/null +++ b/kubernetes/main/apps/media/sonarr/app/externalsecret.yaml @@ -0,0 +1,35 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/external-secrets.io/externalsecret_v1beta1.json +apiVersion: external-secrets.io/v1beta1 +kind: ExternalSecret +metadata: + name: &secret sonarr-secret +spec: + secretStoreRef: + kind: ClusterSecretStore + name: vault-backend + target: + name: *secret + template: + engineVersion: v2 + data: + SONARR__AUTH__APIKEY: "{{ .SONARR_API_KEY }}" + SONARR__POSTGRES__HOST: &dbHost postgres17-rw.database.svc.cluster.local + SONARR__POSTGRES__PORT: "5432" + SONARR__POSTGRES__USER: &dbUser "{{ .SONARR_POSTGRES_USER }}" + SONARR__POSTGRES__PASSWORD: &dbPass "{{ .SONARR_POSTGRES_PASS }}" + SONARR__POSTGRES__MAINDB: &dbName sonarr + PUSHOVER_TOKEN: "{{ .SONARR_PUSHOVER_TOKEN }}" + PUSHOVER_USER_KEY: "{{ .PUSHOVER_USER_KEY }}" + INIT_POSTGRES_DBNAME: *dbName + INIT_POSTGRES_HOST: *dbHost + INIT_POSTGRES_USER: *dbUser + INIT_POSTGRES_PASS: *dbPass + INIT_POSTGRES_SUPER_PASS: "{{ .POSTGRES_SUPER_PASS }}" + dataFrom: + - extract: + key: api-keys + - extract: + key: cloudnative-pg + - extract: + key: pushover diff --git a/kubernetes/main/apps/media/sonarr/app/gatus-config.yaml b/kubernetes/main/apps/media/sonarr/app/gatus-config.yaml new file mode 100644 index 000000000..1696e2fe6 --- /dev/null +++ b/kubernetes/main/apps/media/sonarr/app/gatus-config.yaml @@ -0,0 +1,24 @@ +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: "${APP}-gatus-ep" + labels: + gatus.io/enabled: "true" +data: + config.yaml: | + endpoints: + - name: "${APP}" + group: guarded + url: "https://${APP}.${PUBLIC_DOMAIN}${GATUS_PATH:-/}" + interval: 10m + ui: + hide-hostname: true + hide-url: true + client: + dns-resolver: tcp://172.17.0.10:53 + conditions: + - "[STATUS] == 200" + - "[BODY].status == OK" + alerts: + - type: pushover diff --git a/kubernetes/main/apps/media/sonarr/app/helmrelease.yaml b/kubernetes/main/apps/media/sonarr/app/helmrelease.yaml new file mode 100644 index 000000000..ab86d73aa --- /dev/null +++ b/kubernetes/main/apps/media/sonarr/app/helmrelease.yaml @@ -0,0 +1,155 @@ +--- +# yaml-language-server: $schema=https://raw.githubusercontent.com/bjw-s/helm-charts/main/charts/other/app-template/schemas/helmrelease-helm-v2.schema.json +apiVersion: helm.toolkit.fluxcd.io/v2 +kind: HelmRelease +metadata: + name: sonarr +spec: + interval: 30m + chart: + spec: + verify: + provider: cosign + chart: app-template + version: 3.5.1 + sourceRef: + kind: HelmRepository + name: bjw-s + namespace: flux-system + install: + remediation: + retries: 3 + upgrade: + cleanupOnFail: true + remediation: + strategy: rollback + retries: 3 + + values: + controllers: + sonarr: + annotations: + reloader.stakater.com/auto: "true" + + initContainers: + init-db: + image: + repository: ghcr.io/onedr0p/postgres-init + tag: 16.4@sha256:e41c745b54485341e00efbd27556f0717623a119f0d5107e5ff831aa1322c76f + envFrom: &envFrom + - secretRef: + name: sonarr-secret + + containers: + app: + image: + repository: ghcr.io/onedr0p/sonarr-develop + tag: 4.0.10.2656@sha256:4eb7790aeed145b5495951bd5e68a67c2f04693bfd3c9916ffd4667d4c8b8400 + env: + SONARR__APP__INSTANCENAME: Sonarr + SONARR__APP__THEME: dark + SONARR__AUTH__METHOD: External + SONARR__AUTH__REQUIRED: DisabledForLocalAddresses + SONARR__LOG__DBENABLED: "False" + SONARR__LOG__LEVEL: info + SONARR__SERVER__PORT: &port 80 + SONARR__UPDATE__BRANCH: develop + TZ: Europe/Sofia + envFrom: *envFrom + probes: + liveness: &probes + enabled: true + custom: true + spec: + httpGet: + path: /ping + port: *port + initialDelaySeconds: 0 + periodSeconds: 10 + timeoutSeconds: 1 + failureThreshold: 3 + readiness: *probes + securityContext: + allowPrivilegeEscalation: false + readOnlyRootFilesystem: true + capabilities: { drop: ["ALL"] } + resources: + requests: + cpu: 100m + memory: 512Mi + limits: + memory: 4Gi + + defaultPodOptions: + securityContext: + runAsNonRoot: true + runAsUser: 568 + runAsGroup: 568 + fsGroup: 568 + fsGroupChangePolicy: OnRootMismatch + supplementalGroups: [10000] + seccompProfile: { type: RuntimeDefault } + + service: + app: + controller: sonarr + ports: + http: + port: *port + + ingress: + app: + annotations: + external-dns.alpha.kubernetes.io/target: internal.${PUBLIC_DOMAIN} + nginx.ingress.kubernetes.io/auth-url: "https://$host/oauth2/auth" + nginx.ingress.kubernetes.io/auth-signin: "https://$host/oauth2/start?rd=$escaped_request_uri" + nginx.ingress.kubernetes.io/auth-snippet: | + if ($request_uri ~* "(\/|\/[0-9]+\/)api(/|$|[?])") { + return 200; + } + if ($request_uri ~* "^/ping") { + return 200; + } + nginx.ingress.kubernetes.io/configuration-snippet: | + proxy_set_header Accept-Encoding ""; + sub_filter '' ''; + sub_filter_once on; + className: internal + hosts: + - host: "{{ .Release.Name }}.${PUBLIC_DOMAIN}" + paths: + - path: / + service: + identifier: app + port: http + + persistence: + config: + enabled: true + type: persistentVolumeClaim + size: 15Gi + accessMode: ReadWriteOnce + globalMounts: + - path: /config + + scripts: + type: configMap + name: sonarr-configmap + defaultMode: 0775 + globalMounts: + - path: /scripts/pushover-notify.sh + subPath: pushover-notify.sh + readOnly: true + - path: /scripts/refresh-series.sh + subPath: refresh-series.sh + readOnly: true + tmp: + type: emptyDir + medium: Memory + + media: + type: nfs + server: nas.${PUBLIC_DOMAIN} + path: /mnt/exos20/data + globalMounts: + - path: /data diff --git a/kubernetes/main/apps/media/sonarr/app/kustomization.yaml b/kubernetes/main/apps/media/sonarr/app/kustomization.yaml new file mode 100644 index 000000000..6f3004737 --- /dev/null +++ b/kubernetes/main/apps/media/sonarr/app/kustomization.yaml @@ -0,0 +1,17 @@ +--- +# yaml-language-server: $schema=https://json.schemastore.org/kustomization +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - ./externalsecret.yaml + - ./helmrelease.yaml + - ./gatus-config.yaml +configMapGenerator: + - name: sonarr-configmap + files: + - pushover-notify.sh=./resources/pushover-notify.sh + - refresh-series.sh=./resources/refresh-series.sh +generatorOptions: + disableNameSuffixHash: true + annotations: + kustomize.toolkit.fluxcd.io/substitute: disabled diff --git a/kubernetes/main/apps/media/sonarr/app/resources/pushover-notify.sh b/kubernetes/main/apps/media/sonarr/app/resources/pushover-notify.sh new file mode 100644 index 000000000..384235269 --- /dev/null +++ b/kubernetes/main/apps/media/sonarr/app/resources/pushover-notify.sh @@ -0,0 +1,85 @@ +#!/usr/bin/env bash +# shellcheck disable=SC2154 +set -euo pipefail + +# User defined variables for pushover +PUSHOVER_USER_KEY="${PUSHOVER_USER_KEY:-required}" +PUSHOVER_TOKEN="${PUSHOVER_TOKEN:-required}" +PUSHOVER_PRIORITY="${PUSHOVER_PRIORITY:-"-2"}" + +if [[ "${sonarr_eventtype:-}" == "Test" ]]; then + PUSHOVER_PRIORITY="1" + printf -v PUSHOVER_TITLE \ + "Test Notification" + printf -v PUSHOVER_MESSAGE \ + "Howdy this is a test notification from %s" \ + "${sonarr_instancename:-Sonarr}" + printf -v PUSHOVER_URL \ + "%s" \ + "${sonarr_applicationurl:-localhost}" + printf -v PUSHOVER_URL_TITLE \ + "Open %s" \ + "${sonarr_instancename:-Sonarr}" +fi + +if [[ "${sonarr_eventtype:-}" == "Download" ]]; then + printf -v PUSHOVER_TITLE \ + "Episode %s" \ + "$( [[ "${sonarr_isupgrade}" == "True" ]] && echo "Upgraded" || echo "Downloaded" )" + printf -v PUSHOVER_MESSAGE \ + "%s (S%02dE%02d)\n%s\n\nQuality: %s\nClient: %s" \ + "${sonarr_series_title}" \ + "${sonarr_episodefile_seasonnumber}" \ + "${sonarr_episodefile_episodenumbers}" \ + "${sonarr_episodefile_episodetitles}" \ + "${sonarr_episodefile_quality:-Unknown}" \ + "${sonarr_download_client:-Unknown}" + printf -v PUSHOVER_URL \ + "%s/series/%s" \ + "${sonarr_applicationurl:-localhost}" \ + "${sonarr_series_titleslug}" + printf -v PUSHOVER_URL_TITLE \ + "View series in %s" \ + "${sonarr_instancename:-Sonarr}" +fi + +if [[ "${sonarr_eventtype:-}" == "ManualInteractionRequired" ]]; then + PUSHOVER_PRIORITY="1" + printf -v PUSHOVER_TITLE \ + "Episode import requires intervention" + printf -v PUSHOVER_MESSAGE \ + "%s\nClient: %s" \ + "${sonarr_series_title}" \ + "${sonarr_download_client:-Unknown}" + printf -v PUSHOVER_URL \ + "%s/activity/queue" \ + "${sonarr_applicationurl:-localhost}" + printf -v PUSHOVER_URL_TITLE \ + "View queue in %s" \ + "${sonarr_instancename:-Sonarr}" +fi + +json_data=$(jo \ + token="${PUSHOVER_TOKEN}" \ + user="${PUSHOVER_USER_KEY}" \ + title="${PUSHOVER_TITLE}" \ + message="${PUSHOVER_MESSAGE}" \ + url="${PUSHOVER_URL}" \ + url_title="${PUSHOVER_URL_TITLE}" \ + priority="${PUSHOVER_PRIORITY}" \ + html="1" +) + +status_code=$(curl \ + --silent \ + --write-out "%{http_code}" \ + --output /dev/null \ + --request POST \ + --header "Content-Type: application/json" \ + --data-binary "${json_data}" \ + "https://api.pushover.net/1/messages.json" \ +) + +printf "pushover notification returned with HTTP status code %s and payload: %s\n" \ + "${status_code}" \ + "$(echo "${json_data}" | jq --compact-output)" >&2 diff --git a/kubernetes/main/apps/media/sonarr/app/resources/refresh-series.sh b/kubernetes/main/apps/media/sonarr/app/resources/refresh-series.sh new file mode 100644 index 000000000..be570f166 --- /dev/null +++ b/kubernetes/main/apps/media/sonarr/app/resources/refresh-series.sh @@ -0,0 +1,21 @@ +#!/usr/bin/env bash +# shellcheck disable=SC2154 +set -euo pipefail + +CURL_CMD=(curl -fsSL --header "X-Api-Key: ${SONARR__AUTH__APIKEY:-}") +SONARR_API_URL="http://localhost:${SONARR__SERVER__PORT:-}/api/v3" + +if [[ "${sonarr_eventtype:-}" == "Grab" ]]; then + tba=$("${CURL_CMD[@]}" "${SONARR_API_URL}/episode?seriesId=${sonarr_series_id:-}" | jq --raw-output ' + [.[] | select((.title == "TBA") or (.title == "TBD"))] | length + ') + + if (( tba > 0 )); then + echo "INFO: Refreshing series ${sonarr_series_id:-} due to TBA/TBD episodes found" + "${CURL_CMD[@]}" \ + --request POST \ + --header "Content-Type: application/json" \ + --data-binary '{"name": "RefreshSeries", "seriesId": '"${sonarr_series_id:-}"'}' \ + "${SONARR_API_URL}/command" &>/dev/null + fi +fi diff --git a/kubernetes/main/apps/media/sonarr/ks.yaml b/kubernetes/main/apps/media/sonarr/ks.yaml new file mode 100644 index 000000000..61666ca54 --- /dev/null +++ b/kubernetes/main/apps/media/sonarr/ks.yaml @@ -0,0 +1,28 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/kustomize.toolkit.fluxcd.io/kustomization_v1.json +apiVersion: kustomize.toolkit.fluxcd.io/v1 +kind: Kustomization +metadata: + name: &app sonarr + namespace: flux-system +spec: + targetNamespace: media + commonMetadata: + labels: + app.kubernetes.io/name: *app + dependsOn: + - name: cloudnative-pg-cluster + - name: external-secrets-stores + path: ./kubernetes/main/apps/media/sonarr/app + prune: true + sourceRef: + kind: GitRepository + name: home-kubernetes + wait: false + interval: 30m + timeout: 5m + postBuild: + substitute: + APP: *app + GATUS_PATH: /ping + diff --git a/kubernetes/main/apps/media/unpackerr/app/externalsecret.yaml b/kubernetes/main/apps/media/unpackerr/app/externalsecret.yaml new file mode 100644 index 000000000..7bc6e3f3c --- /dev/null +++ b/kubernetes/main/apps/media/unpackerr/app/externalsecret.yaml @@ -0,0 +1,20 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/external-secrets.io/externalsecret_v1beta1.json +apiVersion: external-secrets.io/v1beta1 +kind: ExternalSecret +metadata: + name: &secret unpackerr-secret +spec: + secretStoreRef: + kind: ClusterSecretStore + name: vault-backend + target: + name: *secret + template: + engineVersion: v2 + data: + UN_RADARR_0_API_KEY: "{{ .RADARR_API_KEY }}" + UN_SONARR_0_API_KEY: "{{ .SONARR_API_KEY }}" + dataFrom: + - extract: + key: secrets/api-keys diff --git a/kubernetes/main/apps/media/unpackerr/app/helmrelease.yaml b/kubernetes/main/apps/media/unpackerr/app/helmrelease.yaml new file mode 100644 index 000000000..c82af8bff --- /dev/null +++ b/kubernetes/main/apps/media/unpackerr/app/helmrelease.yaml @@ -0,0 +1,103 @@ +--- +# yaml-language-server: $schema=https://raw.githubusercontent.com/bjw-s/helm-charts/main/charts/other/app-template/schemas/helmrelease-helm-v2.schema.json +apiVersion: helm.toolkit.fluxcd.io/v2 +kind: HelmRelease +metadata: + name: &app unpackerr +spec: + interval: 30m + chart: + spec: + verify: + provider: cosign + chart: app-template + version: 3.5.1 + sourceRef: + kind: HelmRepository + name: bjw-s + namespace: flux-system + install: + remediation: + retries: 3 + upgrade: + cleanupOnFail: true + remediation: + strategy: rollback + retries: 3 + + values: + controllers: + unpackerr: + annotations: + reloader.stakater.com/auto: "true" + + containers: + app: + image: + repository: ghcr.io/unpackerr/unpackerr + tag: 0.14.5@sha256:dc72256942ce50d1c8a1aeb5aa85b6ae2680a36eefd2182129d8d210fce78044 + env: + TZ: Europe/Sofia + UN_WEBSERVER_METRICS: true + UN_WEBSERVER_LOG_FILE: /logs/webserver.log + UN_ACTIVITY: true + UN_SONARR_0_URL: https://sonarr.${PUBLIC_DOMAIN} + UN_SONARR_0_PATHS_0: /data/downloads/torrents/complete/tv + UN_RADARR_0_URL: https://radarr.${PUBLIC_DOMAIN} + UN_RADARR_0_PATHS_0: /data/downloads/torrents/complete/movies + envFrom: + - secretRef: + name: unpackerr-secret + probes: + liveness: + enabled: true + readiness: + enabled: true + securityContext: + allowPrivilegeEscalation: false + readOnlyRootFilesystem: true + capabilities: { drop: ["ALL"] } + resources: + requests: + cpu: 10m + limits: + memory: 4Gi + + defaultPodOptions: + securityContext: + runAsNonRoot: true + runAsUser: 568 + runAsGroup: 568 + fsGroup: 568 + fsGroupChangePolicy: OnRootMismatch + supplementalGroups: [10000] + seccompProfile: { type: RuntimeDefault } + + service: + app: + controller: *app + ports: + http: + port: 5656 + + serviceMonitor: + app: + serviceName: *app + endpoints: + - port: http + scheme: http + path: /metrics + interval: 1m + scrapeTimeout: 10s + + persistence: + logs: + type: emptyDir + + media: + type: nfs + server: nas.${PUBLIC_DOMAIN} + path: /mnt/exos20/data + globalMounts: + - path: /data/downloads/torrents/complete + subPath: downloads/torrents/complete diff --git a/kubernetes/main/apps/media/unpackerr/app/kustomization.yaml b/kubernetes/main/apps/media/unpackerr/app/kustomization.yaml new file mode 100644 index 000000000..4eed917b9 --- /dev/null +++ b/kubernetes/main/apps/media/unpackerr/app/kustomization.yaml @@ -0,0 +1,7 @@ +--- +# yaml-language-server: $schema=https://json.schemastore.org/kustomization +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - ./externalsecret.yaml + - ./helmrelease.yaml diff --git a/kubernetes/main/apps/media/unpackerr/ks.yaml b/kubernetes/main/apps/media/unpackerr/ks.yaml new file mode 100644 index 000000000..cc1b2c96f --- /dev/null +++ b/kubernetes/main/apps/media/unpackerr/ks.yaml @@ -0,0 +1,22 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/kustomize.toolkit.fluxcd.io/kustomization_v1.json +apiVersion: kustomize.toolkit.fluxcd.io/v1 +kind: Kustomization +metadata: + name: &app unpackerr + namespace: flux-system +spec: + targetNamespace: media + commonMetadata: + labels: + app.kubernetes.io/name: *app + dependsOn: + - name: external-secrets-stores + path: ./kubernetes/main/apps/media/unpackerr/app + prune: true + sourceRef: + kind: GitRepository + name: home-kubernetes + wait: false + interval: 30m + timeout: 5m diff --git a/kubernetes/main/apps/network/cloudflared/app/dnsendpoint.yaml b/kubernetes/main/apps/network/cloudflared/app/dnsendpoint.yaml new file mode 100644 index 000000000..df84ec138 --- /dev/null +++ b/kubernetes/main/apps/network/cloudflared/app/dnsendpoint.yaml @@ -0,0 +1,11 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/externaldns.k8s.io/dnsendpoint_v1alpha1.json +apiVersion: externaldns.k8s.io/v1alpha1 +kind: DNSEndpoint +metadata: + name: cloudflared +spec: + endpoints: + - dnsName: external.${PUBLIC_DOMAIN} + recordType: CNAME + targets: ["${CLUSTER_CLOUDFLARE_TUNNEL_ID}.cfargotunnel.com"] diff --git a/kubernetes/main/apps/network/cloudflared/app/externalsecret.yaml b/kubernetes/main/apps/network/cloudflared/app/externalsecret.yaml new file mode 100644 index 000000000..3e3ca120b --- /dev/null +++ b/kubernetes/main/apps/network/cloudflared/app/externalsecret.yaml @@ -0,0 +1,24 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/external-secrets.io/externalsecret_v1beta1.json +apiVersion: external-secrets.io/v1beta1 +kind: ExternalSecret +metadata: + name: cloudflared-tunnel +spec: + secretStoreRef: + kind: ClusterSecretStore + name: vault-backend + target: + name: cloudflared-tunnel-secret + template: + engineVersion: v2 + data: + credentials.json: | + { + "AccountTag": "{{ .CF_ACCOUNT_TAG }}", + "TunnelSecret": "{{ .CF_TUNNEL_SECRET }}", + "TunnelID": "{{ .CF_TUNNEL_ID }}" + } + dataFrom: + - extract: + key: cloudflare diff --git a/kubernetes/main/apps/network/cloudflared/app/helmrelease.yaml b/kubernetes/main/apps/network/cloudflared/app/helmrelease.yaml new file mode 100644 index 000000000..5a0724644 --- /dev/null +++ b/kubernetes/main/apps/network/cloudflared/app/helmrelease.yaml @@ -0,0 +1,117 @@ +--- +# yaml-language-server: $schema=https://raw.githubusercontent.com/bjw-s/helm-charts/main/charts/other/app-template/schemas/helmrelease-helm-v2.schema.json +apiVersion: helm.toolkit.fluxcd.io/v2 +kind: HelmRelease +metadata: + name: &app cloudflared +spec: + interval: 30m + chart: + spec: + chart: app-template + version: 3.5.1 + sourceRef: + kind: HelmRepository + name: bjw-s + namespace: flux-system + install: + remediation: + retries: 3 + upgrade: + cleanupOnFail: true + remediation: + strategy: rollback + retries: 3 + dependsOn: + - name: nginx-external + namespace: network + values: + controllers: + cloudflared: + replicas: 2 + strategy: RollingUpdate + annotations: + reloader.stakater.com/auto: "true" + containers: + app: + image: + repository: docker.io/cloudflare/cloudflared + tag: 2024.11.0@sha256:2c78df02e1f23ab19d4c636921f05b9ebec163b887e946f98e22e56254a5540f + env: + NO_AUTOUPDATE: true + TUNNEL_CRED_FILE: /etc/cloudflared/creds/credentials.json + TUNNEL_METRICS: 0.0.0.0:8080 + TUNNEL_ORIGIN_ENABLE_HTTP2: true + TUNNEL_TRANSPORT_PROTOCOL: quic + TUNNEL_POST_QUANTUM: true + args: + - tunnel + - --config + - /etc/cloudflared/config/config.yaml + - run + - "${CLUSTER_CLOUDFLARE_TUNNEL_ID}" + probes: + liveness: &probes + enabled: true + custom: true + spec: + httpGet: + path: /ready + port: &port 8080 + initialDelaySeconds: 0 + periodSeconds: 10 + timeoutSeconds: 1 + failureThreshold: 3 + readiness: *probes + securityContext: + allowPrivilegeEscalation: false + readOnlyRootFilesystem: true + capabilities: { drop: ["ALL"] } + resources: + requests: + cpu: 10m + limits: + memory: 256Mi + defaultPodOptions: + securityContext: + runAsNonRoot: true + runAsUser: 65534 + runAsGroup: 65534 + seccompProfile: { type: RuntimeDefault } + topologySpreadConstraints: + - maxSkew: 1 + topologyKey: kubernetes.io/hostname + whenUnsatisfiable: DoNotSchedule + labelSelector: + matchLabels: + app.kubernetes.io/name: *app + service: + app: + controller: cloudflared + ports: + http: + port: *port + serviceMonitor: + app: + serviceName: cloudflared + endpoints: + - port: http + scheme: http + path: /metrics + interval: 1m + scrapeTimeout: 10s + persistence: + config: + type: configMap + name: cloudflared-configmap + globalMounts: + - path: /etc/cloudflared/config/config.yaml + subPath: config.yaml + readOnly: true + creds: + type: secret + name: cloudflared-tunnel-secret + globalMounts: + - path: /etc/cloudflared/creds/credentials.json + subPath: credentials.json + readOnly: true diff --git a/kubernetes/main/apps/network/cloudflared/app/kustomization.yaml b/kubernetes/main/apps/network/cloudflared/app/kustomization.yaml new file mode 100644 index 000000000..86de1bda9 --- /dev/null +++ b/kubernetes/main/apps/network/cloudflared/app/kustomization.yaml @@ -0,0 +1,14 @@ +--- +# yaml-language-server: $schema=https://json.schemastore.org/kustomization +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - ./externalsecret.yaml + - ./dnsendpoint.yaml + - ./helmrelease.yaml +configMapGenerator: + - name: cloudflared-configmap + files: + - config.yaml=./resources/config.yaml +generatorOptions: + disableNameSuffixHash: true diff --git a/kubernetes/main/apps/network/cloudflared/app/resources/config.yaml b/kubernetes/main/apps/network/cloudflared/app/resources/config.yaml new file mode 100644 index 000000000..156318af7 --- /dev/null +++ b/kubernetes/main/apps/network/cloudflared/app/resources/config.yaml @@ -0,0 +1,10 @@ +--- +originRequest: + originServerName: external.${PUBLIC_DOMAIN} + +ingress: + - hostname: ${PUBLIC_DOMAIN} + service: https://nginx-external-controller.network.svc.cluster.local:443 + - hostname: "*.${PUBLIC_DOMAIN}" + service: https://nginx-external-controller.network.svc.cluster.local:443 + - service: http_status:404 diff --git a/kubernetes/main/apps/network/cloudflared/ks.yaml b/kubernetes/main/apps/network/cloudflared/ks.yaml new file mode 100644 index 000000000..0e94bf167 --- /dev/null +++ b/kubernetes/main/apps/network/cloudflared/ks.yaml @@ -0,0 +1,23 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/kustomize.toolkit.fluxcd.io/kustomization_v1.json +apiVersion: kustomize.toolkit.fluxcd.io/v1 +kind: Kustomization +metadata: + name: &app cloudflared + namespace: flux-system +spec: + targetNamespace: network + commonMetadata: + labels: + app.kubernetes.io/name: *app + dependsOn: + - name: external-dns-cloudflare + - name: external-secrets-stores + path: ./kubernetes/main/apps/network/cloudflared/app + prune: false + sourceRef: + kind: GitRepository + name: home-kubernetes + wait: true + interval: 30m + timeout: 5m diff --git a/kubernetes/main/apps/network/crowdsec/app/externalsecret.yaml b/kubernetes/main/apps/network/crowdsec/app/externalsecret.yaml new file mode 100644 index 000000000..d15cce515 --- /dev/null +++ b/kubernetes/main/apps/network/crowdsec/app/externalsecret.yaml @@ -0,0 +1,105 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/external-secrets.io/externalsecret_v1beta1.json +apiVersion: external-secrets.io/v1beta1 +kind: ExternalSecret +metadata: + name: &secret crowdsec-secret +spec: + secretStoreRef: + kind: ClusterSecretStore + name: vault-backend + target: + name: *secret + template: + engineVersion: v2 + data: + INIT_POSTGRES_DBNAME: crowdsec + INIT_POSTGRES_HOST: postgres17-rw.database.svc.cluster.local + INIT_POSTGRES_USER: "{{ .CROWDSEC_POSTGRES_USER }}" + INIT_POSTGRES_PASS: "{{ .CROWDSEC_POSTGRES_PASS }}" + INIT_POSTGRES_SUPER_PASS: "{{ .POSTGRES_SUPER_PASS }}" + ENROLL_KEY: "{{ .ENROLL_KEY }}" + dataFrom: + - extract: + key: secrets/cloudnative-pg + - extract: + key: secrets/crowdsec +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/external-secrets.io/externalsecret_v1beta1.json +apiVersion: external-secrets.io/v1beta1 +kind: ExternalSecret +metadata: + name: &secret crowdsec-config +spec: + secretStoreRef: + kind: ClusterSecretStore + name: vault-backend + target: + name: *secret + template: + engineVersion: v2 + data: + http.yaml: | + ######################################################################### + # Title: CrowdSec : Pushover Notification (API) # + # Author(s): JigSawFr # + # URL: https://github.com/crowdsecurity/crowdsec # + ######################################################################### + # MIT License # + ######################################################################### + + type: http + name: pushover + + # One of "trace", "debug", "info", "warn", "error", "off" + log_level: info + + # group_wait: "30s" # Time to wait collecting alerts before relaying a message to this plugin, eg "30s" + # group_threshold: 10 # Amount of alerts that triggers a message before has expired, eg "10" + # max_retry: 5 # Number of attempts to relay messages to plugins in case of error + # timeout: "10s" # Time to wait for response from the plugin before considering the attempt a failure, eg "10s" + + #------------------------- + # plugin-specific options + + # The following template receives a list of models.Alert objects + # The output goes in the http request body + format: | + { + "token": "{{ .CROWDSEC_PUSHOVER_TOKEN }}", + "user": "{{ .PUSHOVER_USER_KEY }}", + "message": "{{`{{range . -}}{{$alert := . -}}{{range .Decisions -}}{{.Value}} will get {{.Type}} for the next {{.Duration}} for triggering {{.Scenario}}.\r\n https://www.shodan.io/host/{{.Value}}{{end -}}{{end -}}`}}", + "html": "1", + "title": "Scenario triggered on IDS/IPS !" + } + url: https://api.pushover.net/1/messages.json + method: POST + headers: + Content-Type: "application/json" + profiles.yaml: | + name: default_ip_remediation + #debug: true + filters: + - Alert.Remediation == true && Alert.GetScope() == "Ip" + decisions: + - type: ban + duration: 4h + duration_expr: Sprintf('%dh', (GetDecisionsCount(Alert.GetValue()) + 1) * 4) + notifications: + - pushover + on_success: break + --- + name: default_range_remediation + #debug: true + filters: + - Alert.Remediation == true && Alert.GetScope() == "Range" + decisions: + - type: ban + duration: 4h + duration_expr: Sprintf('%dh', (GetDecisionsCount(Alert.GetValue()) + 1) * 4) + notifications: + - pushover + on_success: break + dataFrom: + - extract: + key: secrets/pushover diff --git a/kubernetes/main/apps/network/crowdsec/app/helmrelease.yaml b/kubernetes/main/apps/network/crowdsec/app/helmrelease.yaml new file mode 100644 index 000000000..47aae03c6 --- /dev/null +++ b/kubernetes/main/apps/network/crowdsec/app/helmrelease.yaml @@ -0,0 +1,112 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/helm.toolkit.fluxcd.io/helmrelease_v2.json +apiVersion: helm.toolkit.fluxcd.io/v2 +kind: HelmRelease +metadata: + name: crowdsec +spec: + interval: 30m + chart: + spec: + chart: crowdsec + version: 0.13.0 + sourceRef: + kind: HelmRepository + name: crowdsec + namespace: flux-system + maxHistory: 3 + install: + remediation: + retries: 3 + upgrade: + cleanupOnFail: true + remediation: + strategy: rollback + retries: 3 + values: + container_runtime: containerd + agent: + wait_for_lapi: + image: + tag: "1.37.0@sha256:768e5c6f5cb6db0794eec98dc7a967f40631746c32232b78a3105fb946f3ab83" + acquisition: + - namespace: network + podName: nginx-external-controller-* + program: nginx + - namespace: media + podName: jellyseerr-* + program: jellyseerr + - namespace: vaultwarden + podName: vaultwarden-* + program: VAULTWARDEN + - namespace: idp + podName: keycloak-0 + program: keycloak + env: + - name: PARSERS + value: "crowdsecurity/cri-logs crowdsecurity/whitelists crowdsecurity/geoip-enrich" + - name: COLLECTIONS + value: "crowdsecurity/modsecurity inherent-io/keycloak LePresidente/jellyseerr crowdsecurity/nginx Dominic-Wagner/vaultwarden crowdsecurity/base-http-scenarios crowdsecurity/http-cve crowdsecurity/http-dos crowdsecurity/whitelist-good-actors" + metrics: + enabled: true + serviceMonitor: + enabled: true + persistentVolume: + config: + enabled: true + lapi: + extraVolumes: + - name: custom-configs + secret: + secretName: crowdsec-config + extraVolumeMounts: + - name: custom-configs + mountPath: /etc/crowdsec_data/notifications/http.yaml + subPath: http.yaml + - name: custom-configs + mountPath: /etc/crowdsec_data/profiles.yaml + subPath: profiles.yaml + deployAnnotations: + reloader.stakater.com/auto: "true" + extraInitContainers: + - name: initdb + image: "ghcr.io/onedr0p/postgres-init:16.4@sha256:e41c745b54485341e00efbd27556f0717623a119f0d5107e5ff831aa1322c76f" + imagePullPolicy: IfNotPresent + envFrom: + - secretRef: + name: crowdsec-secret + env: + # For an internal test, disable the Online API + - name: DISABLE_ONLINE_API + value: "false" + - name: ENROLL_KEY + valueFrom: + secretKeyRef: + name: &secret crowdsec-secret + key: ENROLL_KEY + - name: ENROLL_INSTANCE_NAME + value: "cluster" + - name: DB_PASSWORD + valueFrom: + secretKeyRef: + name: *secret + key: INIT_POSTGRES_PASS + - name: DB_USER + valueFrom: + secretKeyRef: + name: *secret + key: INIT_POSTGRES_USER + metrics: + enabled: true + serviceMonitor: + enabled: true + config: + config.yaml.local: | + db_config: + type: postgresql + user: $${DB_USER} + password: $${DB_PASSWORD} + db_name: crowdsec + host: postgres17-rw.database + port: 5432 + sslmode: require diff --git a/kubernetes/main/apps/network/crowdsec/app/kustomization.yaml b/kubernetes/main/apps/network/crowdsec/app/kustomization.yaml new file mode 100644 index 000000000..4eed917b9 --- /dev/null +++ b/kubernetes/main/apps/network/crowdsec/app/kustomization.yaml @@ -0,0 +1,7 @@ +--- +# yaml-language-server: $schema=https://json.schemastore.org/kustomization +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - ./externalsecret.yaml + - ./helmrelease.yaml diff --git a/kubernetes/main/apps/network/crowdsec/ks.yaml b/kubernetes/main/apps/network/crowdsec/ks.yaml new file mode 100644 index 000000000..5c9006f49 --- /dev/null +++ b/kubernetes/main/apps/network/crowdsec/ks.yaml @@ -0,0 +1,23 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/kustomize.toolkit.fluxcd.io/kustomization_v1.json +apiVersion: kustomize.toolkit.fluxcd.io/v1 +kind: Kustomization +metadata: + name: &app crowdsec + namespace: flux-system +spec: + targetNamespace: network + commonMetadata: + labels: + app.kubernetes.io/name: *app + dependsOn: + - name: external-secrets-stores + - name: cloudnative-pg-cluster + path: ./kubernetes/main/apps/network/crowdsec/app + prune: false + sourceRef: + kind: GitRepository + name: home-kubernetes + wait: true + interval: 30m + timeout: 5m diff --git a/kubernetes/main/apps/network/external-dns/RFC3645/config.yaml b/kubernetes/main/apps/network/external-dns/RFC3645/config.yaml new file mode 100644 index 000000000..90d3507dd --- /dev/null +++ b/kubernetes/main/apps/network/external-dns/RFC3645/config.yaml @@ -0,0 +1,32 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + creationTimestamp: null + name: krb5.conf +data: + krb5.conf: | + [logging] + default = FILE:/var/log/krb5libs.log + kdc = FILE:/var/log/krb5kdc.log + admin_server = FILE:/var/log/kadmind.log + + [libdefaults] + dns_lookup_realm = false + ticket_lifetime = 24h + renew_lifetime = 7d + forwardable = true + rdns = false + pkinit_anchors = /etc/pki/tls/certs/ca-bundle.crt + default_ccache_name = KEYRING:persistent:%{uid} + + default_realm = ${AD_REALM} + + [realms] + ${AD_REALM} = { + kdc = dc01.${PUBLIC_DOMAIN} + admin_server = dc01.${PUBLIC_DOMAIN} + } + + [domain_realm] + ${PUBLIC_DOMAIN} = ${AD_REALM} + .${PUBLIC_DOMAIN} = ${AD_REALM} diff --git a/kubernetes/main/apps/network/external-dns/RFC3645/helmrelease.yaml b/kubernetes/main/apps/network/external-dns/RFC3645/helmrelease.yaml new file mode 100644 index 000000000..7c7ba9fd8 --- /dev/null +++ b/kubernetes/main/apps/network/external-dns/RFC3645/helmrelease.yaml @@ -0,0 +1,57 @@ +--- +# yaml-language-server: $schema=https://raw.githubusercontent.com/kubernetes-sigs/external-dns/refs/heads/master/charts/external-dns/values.schema.json +apiVersion: helm.toolkit.fluxcd.io/v2 +kind: HelmRelease +metadata: + name: &app external-dns-bind +spec: + interval: 30m + chart: + spec: + chart: external-dns + version: 1.15.0 + sourceRef: + kind: HelmRepository + name: external-dns + namespace: flux-system + install: + crds: CreateReplace + remediation: + retries: 3 + upgrade: + cleanupOnFail: true + crds: CreateReplace + remediation: + strategy: rollback + retries: 3 + values: + fullnameOverride: *app + provider: + name: rfc2136 + extraArgs: + - --events + - --ignore-ingress-tls-spec + - --rfc2136-gss-tsig + - --rfc2136-host=dc01.${PUBLIC_DOMAIN} + - --rfc2136-port=53 + - --rfc2136-zone=${PUBLIC_DOMAIN} + - --rfc2136-kerberos-username=externaldns_service + - --rfc2136-kerberos-password=${KERBEROS_PASSWORD} + - --rfc2136-kerberos-realm=${AD_REALM} + - --rfc2136-tsig-axfr + policy: sync + sources: ["ingress", "service"] + txtOwnerId: default + txtPrefix: k8s. + domainFilters: ["${PUBLIC_DOMAIN}"] + serviceMonitor: + enabled: true + extraVolumes: + - configMap: + defaultMode: 420 + name: krb5.conf + name: kerberos-config-volume + extraVolumeMounts: + - mountPath: /etc/krb5.conf + name: kerberos-config-volume + subPath: krb5.conf diff --git a/kubernetes/main/apps/network/external-dns/RFC3645/kustomization.yaml b/kubernetes/main/apps/network/external-dns/RFC3645/kustomization.yaml new file mode 100644 index 000000000..3ea08d247 --- /dev/null +++ b/kubernetes/main/apps/network/external-dns/RFC3645/kustomization.yaml @@ -0,0 +1,7 @@ +--- +# yaml-language-server: $schema=https://json.schemastore.org/kustomization +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - ./config.yaml + - ./helmrelease.yaml diff --git a/kubernetes/main/apps/network/external-dns/cloudflare/externalsecret.yaml b/kubernetes/main/apps/network/external-dns/cloudflare/externalsecret.yaml new file mode 100644 index 000000000..948a62faa --- /dev/null +++ b/kubernetes/main/apps/network/external-dns/cloudflare/externalsecret.yaml @@ -0,0 +1,20 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/external-secrets.io/externalsecret_v1beta1.json +apiVersion: external-secrets.io/v1beta1 +kind: ExternalSecret +metadata: + name: external-dns-cloudflare +spec: + secretStoreRef: + kind: ClusterSecretStore + name: vault-backend + target: + name: external-dns-cloudflare-secret + template: + engineVersion: v2 + data: + CF_API_EMAIL: "{{ .CF_API_EMAIL }}" + CF_API_TOKEN: "{{ .CF_API_TOKEN }}" + dataFrom: + - extract: + key: secrets/cloudflare diff --git a/kubernetes/main/apps/network/external-dns/cloudflare/helmrelease.yaml b/kubernetes/main/apps/network/external-dns/cloudflare/helmrelease.yaml new file mode 100644 index 000000000..8570ea084 --- /dev/null +++ b/kubernetes/main/apps/network/external-dns/cloudflare/helmrelease.yaml @@ -0,0 +1,58 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/helm.toolkit.fluxcd.io/helmrelease_v2.json +apiVersion: helm.toolkit.fluxcd.io/v2 +kind: HelmRelease +metadata: + name: &app external-dns-cloudflare +spec: + interval: 30m + chart: + spec: + chart: external-dns + version: 1.15.0 + sourceRef: + kind: HelmRepository + name: external-dns + namespace: flux-system + install: + crds: CreateReplace + remediation: + retries: 3 + upgrade: + cleanupOnFail: true + crds: CreateReplace + remediation: + strategy: rollback + retries: 3 + values: + fullnameOverride: *app + provider: + name: cloudflare + env: + - name: &name CF_API_EMAIL + valueFrom: + secretKeyRef: + name: &secret external-dns-cloudflare-secret + key: *name + - name: &name CF_API_TOKEN + valueFrom: + secretKeyRef: + name: *secret + key: *name + extraArgs: + - --cloudflare-dns-records-per-page=1000 + - --cloudflare-proxied + - --crd-source-apiversion=externaldns.k8s.io/v1alpha1 + - --crd-source-kind=DNSEndpoint + - --events + - --ignore-ingress-tls-spec + - --ingress-class=external + policy: sync + sources: ["crd", "ingress"] + txtOwnerId: default + txtPrefix: k8s. + domainFilters: ["${PUBLIC_DOMAIN}"] + serviceMonitor: + enabled: true + podAnnotations: + secret.reloader.stakater.com/reload: *secret diff --git a/kubernetes/main/apps/network/external-dns/cloudflare/kustomization.yaml b/kubernetes/main/apps/network/external-dns/cloudflare/kustomization.yaml new file mode 100644 index 000000000..4eed917b9 --- /dev/null +++ b/kubernetes/main/apps/network/external-dns/cloudflare/kustomization.yaml @@ -0,0 +1,7 @@ +--- +# yaml-language-server: $schema=https://json.schemastore.org/kustomization +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - ./externalsecret.yaml + - ./helmrelease.yaml diff --git a/kubernetes/main/apps/network/external-dns/ks.yaml b/kubernetes/main/apps/network/external-dns/ks.yaml new file mode 100644 index 000000000..cd9526cfe --- /dev/null +++ b/kubernetes/main/apps/network/external-dns/ks.yaml @@ -0,0 +1,44 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/kustomize.toolkit.fluxcd.io/kustomization_v1.json +apiVersion: kustomize.toolkit.fluxcd.io/v1 +kind: Kustomization +metadata: + name: &app external-dns-cloudflare + namespace: flux-system +spec: + targetNamespace: network + commonMetadata: + labels: + app.kubernetes.io/name: *app + dependsOn: + - name: external-secrets-stores + path: ./kubernetes/main/apps/network/external-dns/cloudflare + prune: false + sourceRef: + kind: GitRepository + name: home-kubernetes + wait: true + interval: 30m + timeout: 5m +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/kustomize.toolkit.fluxcd.io/kustomization_v1.json +apiVersion: kustomize.toolkit.fluxcd.io/v1 +kind: Kustomization +metadata: + name: &app external-dns-rfc2136 + namespace: flux-system +spec: + targetNamespace: network + commonMetadata: + labels: + app.kubernetes.io/name: *app + dependsOn: + - name: external-secrets-stores + path: ./kubernetes/main/apps/network/external-dns/RFC3645 + prune: false + sourceRef: + kind: GitRepository + name: home-kubernetes + wait: true + interval: 30m + timeout: 5m diff --git a/kubernetes/main/apps/network/kustomization.yaml b/kubernetes/main/apps/network/kustomization.yaml new file mode 100644 index 000000000..96b348275 --- /dev/null +++ b/kubernetes/main/apps/network/kustomization.yaml @@ -0,0 +1,12 @@ +--- +# yaml-language-server: $schema=https://json.schemastore.org/kustomization +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + # Pre Flux-Kustomizations + - ./namespace.yaml + # Flux-Kustomizations + - ./cloudflared/ks.yaml + - ./external-dns/ks.yaml + - ./crowdsec/ks.yaml + - ./nginx/ks.yaml diff --git a/kubernetes/main/apps/network/namespace.yaml b/kubernetes/main/apps/network/namespace.yaml new file mode 100644 index 000000000..356e3dc5a --- /dev/null +++ b/kubernetes/main/apps/network/namespace.yaml @@ -0,0 +1,38 @@ +--- +apiVersion: v1 +kind: Namespace +metadata: + name: network + annotations: + kustomize.toolkit.fluxcd.io/prune: disabled + volsync.backube/privileged-movers: "true" +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/notification.toolkit.fluxcd.io/provider_v1beta3.json +apiVersion: notification.toolkit.fluxcd.io/v1beta3 +kind: Provider +metadata: + name: alert-manager + namespace: network +spec: + type: alertmanager + address: http://alertmanager-operated.observability.svc.cluster.local:9093/api/v2/alerts/ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/notification.toolkit.fluxcd.io/alert_v1beta3.json +apiVersion: notification.toolkit.fluxcd.io/v1beta3 +kind: Alert +metadata: + name: alert-manager + namespace: network +spec: + providerRef: + name: alert-manager + eventSeverity: error + eventSources: + - kind: HelmRelease + name: "*" + exclusionList: + - "error.*lookup github\\.com" + - "error.*lookup raw\\.githubusercontent\\.com" + - "dial.*tcp.*timeout" + - "waiting.*socket" + suspend: false diff --git a/kubernetes/main/apps/network/nginx/external/externalsecret.yaml b/kubernetes/main/apps/network/nginx/external/externalsecret.yaml new file mode 100644 index 000000000..d46abf55e --- /dev/null +++ b/kubernetes/main/apps/network/nginx/external/externalsecret.yaml @@ -0,0 +1,67 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/external-secrets.io/externalsecret_v1beta1.json +apiVersion: external-secrets.io/v1beta1 +kind: ExternalSecret +metadata: + name: &secret nginx-external-secret +spec: + secretStoreRef: + kind: ClusterSecretStore + name: vault-backend + target: + name: *secret + template: + engineVersion: v2 + data: + MAXMIND_LICENSE_KEY: "{{ .MAXMIND_LICENSE_KEY }}" + dataFrom: + - extract: + key: secrets/maxmind + - extract: + key: secrets/api-keys +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/external-secrets.io/externalsecret_v1beta1.json +apiVersion: external-secrets.io/v1beta1 +kind: ExternalSecret +metadata: + name: &secret crowdsec-bouncer-config +spec: + secretStoreRef: + kind: ClusterSecretStore + name: vault-backend + target: + name: *secret + template: + engineVersion: v2 + data: + crowdsec-bouncer.conf: | + ENABLED=true + API_URL=http://crowdsec-service.network.svc.cluster.local:8080 + API_KEY={{ .BOUNCER_API_KEY }} + BOUNCING_ON_TYPE=all + FALLBACK_REMEDIATION=ban + REQUEST_TIMEOUT=1000 + UPDATE_FREQUENCY=60 + MODE=stream + BAN_TEMPLATE_PATH=/etc/nginx/lua/plugins/crowdsec/templates/ban.html + RET_CODE=403 + dataFrom: + - extract: + key: secrets/crowdsec +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/external-secrets.io/externalsecret_v1beta1.json +apiVersion: external-secrets.io/v1beta1 +kind: ExternalSecret +metadata: + name: &secret nginx-external-dhparam +spec: + secretStoreRef: + kind: ClusterSecretStore + name: vault-backend + target: + name: *secret + data: + - secretKey: dhparam.pem + remoteRef: + key: secrets/nginx-external + property: dhparam.pem diff --git a/kubernetes/main/apps/network/nginx/external/helmrelease.yaml b/kubernetes/main/apps/network/nginx/external/helmrelease.yaml new file mode 100644 index 000000000..2d5d66932 --- /dev/null +++ b/kubernetes/main/apps/network/nginx/external/helmrelease.yaml @@ -0,0 +1,179 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/helm.toolkit.fluxcd.io/helmrelease_v2.json +apiVersion: helm.toolkit.fluxcd.io/v2 +kind: HelmRelease +metadata: + name: nginx-external +spec: + interval: 30m + chart: + spec: + chart: ingress-nginx + version: 4.11.3 + sourceRef: + kind: HelmRepository + name: ingress-nginx + namespace: flux-system + install: + remediation: + retries: 3 + upgrade: + cleanupOnFail: true + remediation: + strategy: rollback + retries: 3 + valuesFrom: + - targetPath: controller.maxmindLicenseKey + kind: Secret + name: nginx-external-secret + valuesKey: MAXMIND_LICENSE_KEY + values: + fullnameOverride: nginx-external + controller: + annotations: + reloader.stakater.com/auto: "true" + podAnnotations: + prometheus.io/scrape: "true" + prometheus.io/port: "10254" + extraVolumes: + - name: crowdsec-bouncer-plugin + emptyDir: {} + - name: crowdsec-bouncer-config + secret: + secretName: crowdsec-bouncer-config + defaultMode: 292 + - name: owasp-crs + persistentVolumeClaim: + claimName: nfs-pvc + - name: modsecurity-config + configMap: + name: modsecurity-config + extraInitContainers: + - name: init-clone-crowdsec-bouncer + image: crowdsecurity/lua-bouncer-plugin:v1.0.5@sha256:90f5c611bebbbe89b8aef3218dad1df3bd9fbe51554024384b56026c69c55925 + imagePullPolicy: IfNotPresent + command: ['sh', '-c', "sh /docker_start.sh; mkdir -p /lua_plugins/crowdsec/; cp -R /crowdsec/* /lua_plugins/crowdsec/"] + env: + - name: BOUNCER_CONFIG + value: "/crowdsec/crowdsec-bouncer.conf" + securityContext: + capabilities: + drop: + - ALL + runAsUser: 10001 + runAsGroup: 10001 + privileged: false + allowPrivilegeEscalation: false + runAsNonRoot: true + seccompProfile: + type: RuntimeDefault + volumeMounts: + - name: crowdsec-bouncer-plugin + mountPath: /lua_plugins + extraVolumeMounts: + - name: crowdsec-bouncer-plugin + mountPath: /etc/nginx/lua/plugins/crowdsec + subPath: crowdsec + readOnly: true + - name: crowdsec-bouncer-config + mountPath: /etc/nginx/lua/plugins/crowdsec/crowdsec-bouncer.conf + subPath: crowdsec-bouncer.conf + readOnly: true + - mountPath: /etc/nginx/owasp-modsecurity-crs + name: owasp-crs + readOnly: false + - name: modsecurity-config + mountPath: /etc/nginx/modsecurity/modsecurity.conf + subPath: modsecurity.conf + readOnly: true + #- name: modsecurity-config + #mountPath: /etc/nginx/template/nginx.tmpl + #subPath: nginx.tmpl + #readOnly: false + replicaCount: 2 + service: + annotations: + external-dns.alpha.kubernetes.io/hostname: external.${PUBLIC_DOMAIN} + lbipam.cilium.io/ips: 192.168.91.95 + ingressClassResource: + name: external + default: false + controllerValue: k8s.io/external + admissionWebhooks: + objectSelector: + matchExpressions: + - key: ingress-class + operator: In + values: ["external"] + allowSnippetAnnotations: true + config: + add-headers: network/custom-headers + custom-http-errors: "504,503,502,501,500,429,410,404,403,400" + plugins: "crowdsec" + lua-shared-dicts: "crowdsec_cache: 50m" + block-user-agents: "GPTBot,~*GPTBot*,ChatGPT-User,~*ChatGPT-User*,Google-Extended,~*Google-Extended*,CCBot,~*CCBot*,Omgilibot,~*Omgilibot*,FacebookBot,~*FacebookBot*" # taken from https://github.com/superseriousbusiness/gotosocial/blob/main/internal/web/robots.go + client-body-buffer-size: 100M + client-body-timeout: 120 + client-header-timeout: 120 + enable-brotli: "true" + enable-ocsp: "true" + enable-real-ip: "true" + hide-headers: Server,X-Powered-By + hsts-max-age: 31536000 + force-ssl-redirect: "true" + keep-alive-requests: 10000 + keep-alive: 10 + log-format-escape-json: "true" + log-format-upstream: > + {"time": "$time_iso8601", + "remote_addr": "proxy_protocol_addr": "$proxy_protocol_addr", "x_forwarded_for": "$proxy_add_x_forwarded_for", + "request_id": "$req_id", + "remote_user": "$remote_user", + "bytes_sent": "$bytes_sent", + "request_time": "$request_time", + "status": "$status", + "vhost": "$host", + "request_proto": "$server_protocol", + "path": "$uri", + "request_query": "$args", + "request_length": "$request_length", + "duration": "$request_time", + "method": "$request_method", + "http_referrer": "$http_referer", + "http_user_agent": "$http_user_agent"} + proxy-body-size: 0 + proxy-buffer-size: 16k + ssl-protocols: TLSv1.3 + ssl-ciphers: EECDH+AESGCM:EDH+AESGCM + ssl-dh-param: network/nginx-external-dhparam + use-geoip2: "false" + use-forwarded-headers: "true" + metrics: + enabled: true + serviceMonitor: + enabled: true + namespaceSelector: + any: true + extraArgs: + default-ssl-certificate: network/darkfellanet-tls + terminationGracePeriodSeconds: 60 + topologySpreadConstraints: + - maxSkew: 1 + topologyKey: kubernetes.io/hostname + whenUnsatisfiable: DoNotSchedule + labelSelector: + matchLabels: + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: nginx-external + app.kubernetes.io/component: controller + resources: + requests: + cpu: 100m + limits: + memory: 2000Mi + defaultBackend: + enabled: true + image: + registry: ghcr.io + image: darkfella91/custom-error-pages + tag: v1.0.2@sha256:8d5310c797a03e7ce894e6e6188c7caf83f37c469d02cb96812b9080bc23d584 diff --git a/kubernetes/main/apps/network/nginx/external/kustomization.yaml b/kubernetes/main/apps/network/nginx/external/kustomization.yaml new file mode 100644 index 000000000..4eed917b9 --- /dev/null +++ b/kubernetes/main/apps/network/nginx/external/kustomization.yaml @@ -0,0 +1,7 @@ +--- +# yaml-language-server: $schema=https://json.schemastore.org/kustomization +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - ./externalsecret.yaml + - ./helmrelease.yaml diff --git a/kubernetes/main/apps/network/nginx/internal/helmrelease.yaml b/kubernetes/main/apps/network/nginx/internal/helmrelease.yaml new file mode 100644 index 000000000..32a953cbf --- /dev/null +++ b/kubernetes/main/apps/network/nginx/internal/helmrelease.yaml @@ -0,0 +1,92 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/helm.toolkit.fluxcd.io/helmrelease_v2.json +apiVersion: helm.toolkit.fluxcd.io/v2 +kind: HelmRelease +metadata: + name: nginx-internal +spec: + interval: 30m + chart: + spec: + chart: ingress-nginx + version: 4.11.3 + sourceRef: + kind: HelmRepository + name: ingress-nginx + namespace: flux-system + install: + remediation: + retries: 3 + upgrade: + cleanupOnFail: true + remediation: + strategy: rollback + retries: 3 + values: + fullnameOverride: nginx-internal + controller: + replicaCount: 2 + service: + annotations: + external-dns.alpha.kubernetes.io/hostname: internal.${PUBLIC_DOMAIN} + lbipam.cilium.io/ips: 192.168.91.97 + ingressClassResource: + name: internal + default: true + controllerValue: k8s.io/internal + admissionWebhooks: + objectSelector: + matchExpressions: + - key: ingress-class + operator: In + values: ["internal"] + allowSnippetAnnotations: true + config: + block-user-agents: "GPTBot,~*GPTBot*,ChatGPT-User,~*ChatGPT-User*,Google-Extended,~*Google-Extended*,CCBot,~*CCBot*,Omgilibot,~*Omgilibot*,FacebookBot,~*FacebookBot*" # taken from https://github.com/superseriousbusiness/gotosocial/blob/main/internal/web/robots.go + client-body-buffer-size: 100M + client-body-timeout: 120 + client-header-timeout: 120 + enable-brotli: "false" + enable-ocsp: "true" + enable-real-ip: "true" + force-ssl-redirect: "true" + hide-headers: Server,X-Powered-By + hsts-max-age: 31449600 + keep-alive-requests: 10000 + keep-alive: 120 + log-format-escape-json: "true" + log-format-upstream: > + {"time": "$time_iso8601", "remote_addr": "proxy_protocol_addr": "$proxy_protocol_addr", "x_forwarded_for": "$proxy_add_x_forwarded_for", + "request_id": "$req_id", "remote_user": "$remote_user", "bytes_sent": $bytes_sent, "request_time": $request_time, + "status": $status, "vhost": "$host", "request_proto": "$server_protocol", "path": "$uri", "request_query": "$args", + "request_length": $request_length, "duration": $request_time, "method": "$request_method", "http_referrer": "$http_referer", + "http_user_agent": "$http_user_agent"} + proxy-body-size: 0 + proxy-buffer-size: 16k + ssl-protocols: TLSv1.3 + use-forwarded-headers: "true" + metrics: + enabled: true + serviceMonitor: + enabled: true + namespaceSelector: + any: true + extraArgs: + default-ssl-certificate: network/darkfellanet-tls + terminationGracePeriodSeconds: 120 + topologySpreadConstraints: + - maxSkew: 1 + topologyKey: kubernetes.io/hostname + whenUnsatisfiable: DoNotSchedule + labelSelector: + matchLabels: + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: nginx-internal + app.kubernetes.io/component: controller + resources: + requests: + cpu: 100m + limits: + memory: 1000Mi + defaultBackend: + enabled: false diff --git a/kubernetes/main/apps/network/nginx/internal/kustomization.yaml b/kubernetes/main/apps/network/nginx/internal/kustomization.yaml new file mode 100644 index 000000000..17cbc72b2 --- /dev/null +++ b/kubernetes/main/apps/network/nginx/internal/kustomization.yaml @@ -0,0 +1,6 @@ +--- +# yaml-language-server: $schema=https://json.schemastore.org/kustomization +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - ./helmrelease.yaml diff --git a/kubernetes/main/apps/network/nginx/ks.yaml b/kubernetes/main/apps/network/nginx/ks.yaml new file mode 100644 index 000000000..88e6deec4 --- /dev/null +++ b/kubernetes/main/apps/network/nginx/ks.yaml @@ -0,0 +1,62 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/kustomize.toolkit.fluxcd.io/kustomization_v1.json +apiVersion: kustomize.toolkit.fluxcd.io/v1 +kind: Kustomization +metadata: + name: &app nginx-external + namespace: flux-system +spec: + targetNamespace: network + commonMetadata: + labels: + app.kubernetes.io/name: *app + dependsOn: + - name: external-secrets-stores + path: ./kubernetes/main/apps/network/nginx/external + prune: false + sourceRef: + kind: GitRepository + name: home-kubernetes + wait: false + interval: 30m + timeout: 5m +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/kustomize.toolkit.fluxcd.io/kustomization_v1.json +apiVersion: kustomize.toolkit.fluxcd.io/v1 +kind: Kustomization +metadata: + name: &app nginx-internal + namespace: flux-system +spec: + targetNamespace: network + commonMetadata: + labels: + app.kubernetes.io/name: *app + path: ./kubernetes/main/apps/network/nginx/internal + prune: false + sourceRef: + kind: GitRepository + name: home-kubernetes + wait: false + interval: 30m + timeout: 5m +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/kustomize.toolkit.fluxcd.io/kustomization_v1.json +apiVersion: kustomize.toolkit.fluxcd.io/v1 +kind: Kustomization +metadata: + name: &app nginx-owasp-crs-storage + namespace: flux-system +spec: + targetNamespace: network + commonMetadata: + labels: + app.kubernetes.io/name: *app + path: ./kubernetes/main/apps/network/nginx/owasp-crs-storage + prune: false + sourceRef: + kind: GitRepository + name: home-kubernetes + wait: false + interval: 30m + timeout: 5m diff --git a/kubernetes/main/apps/network/nginx/owasp-crs-storage/kustomization.yaml b/kubernetes/main/apps/network/nginx/owasp-crs-storage/kustomization.yaml new file mode 100644 index 000000000..721a139bc --- /dev/null +++ b/kubernetes/main/apps/network/nginx/owasp-crs-storage/kustomization.yaml @@ -0,0 +1,16 @@ +--- +# yaml-language-server: $schema=https://json.schemastore.org/kustomization +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - ./pv.yaml + - ./pvc.yaml +configMapGenerator: + - name: modsecurity-config + files: + - modsecurity.conf=./resources/modsecurity.conf + - nginx.tmpl=./resources/nginx.yaml +generatorOptions: + disableNameSuffixHash: true + annotations: + kustomize.toolkit.fluxcd.io/substitute: disabled diff --git a/kubernetes/main/apps/network/nginx/owasp-crs-storage/pv.yaml b/kubernetes/main/apps/network/nginx/owasp-crs-storage/pv.yaml new file mode 100644 index 000000000..5c4e48645 --- /dev/null +++ b/kubernetes/main/apps/network/nginx/owasp-crs-storage/pv.yaml @@ -0,0 +1,13 @@ +apiVersion: v1 +kind: PersistentVolume +metadata: + name: nfs-pv +spec: + capacity: + storage: 1Gi + accessModes: + - ReadWriteOnce + nfs: + path: /mnt/exos20/owasp-modsecurity-crs + server: nas.${PUBLIC_DOMAIN} + persistentVolumeReclaimPolicy: Retain diff --git a/kubernetes/main/apps/network/nginx/owasp-crs-storage/pvc.yaml b/kubernetes/main/apps/network/nginx/owasp-crs-storage/pvc.yaml new file mode 100644 index 000000000..0748c6538 --- /dev/null +++ b/kubernetes/main/apps/network/nginx/owasp-crs-storage/pvc.yaml @@ -0,0 +1,12 @@ +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: nfs-pvc +spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 1Gi + volumeName: nfs-pv + storageClassName: "" diff --git a/kubernetes/main/apps/network/nginx/owasp-crs-storage/resources/modsecurity.conf b/kubernetes/main/apps/network/nginx/owasp-crs-storage/resources/modsecurity.conf new file mode 100644 index 000000000..eb62f9ec3 --- /dev/null +++ b/kubernetes/main/apps/network/nginx/owasp-crs-storage/resources/modsecurity.conf @@ -0,0 +1,287 @@ +# -- Rule engine initialization ---------------------------------------------- + +# Enable ModSecurity, attaching it to every transaction. Use detection +# only to start with, because that minimises the chances of post-installation +# disruption. +# +SecRuleEngine DetectionOnly + + +# -- Request body handling --------------------------------------------------- + +# Allow ModSecurity to access request bodies. If you don't, ModSecurity +# won't be able to see any POST parameters, which opens a large security +# hole for attackers to exploit. +# +SecRequestBodyAccess On + + +# Enable XML request body parser. +# Initiate XML Processor in case of xml content-type +# +SecRule REQUEST_HEADERS:Content-Type "^(?:application(?:/soap\+|/)|text/)xml" \ + "id:'200000',phase:1,t:none,t:lowercase,pass,nolog,ctl:requestBodyProcessor=XML" + +# Enable JSON request body parser. +# Initiate JSON Processor in case of JSON content-type; change accordingly +# if your application does not use 'application/json' +# +SecRule REQUEST_HEADERS:Content-Type "^application/json" \ + "id:'200001',phase:1,t:none,t:lowercase,pass,nolog,ctl:requestBodyProcessor=JSON" + +# Sample rule to enable JSON request body parser for more subtypes. +# Uncomment or adapt this rule if you want to engage the JSON +# Processor for "+json" subtypes +# +SecRule REQUEST_HEADERS:Content-Type "^application/[a-z0-9.-]+[+]json" \ + "id:'200006',phase:1,t:none,t:lowercase,pass,nolog,ctl:requestBodyProcessor=JSON" + +# Maximum request body size we will accept for buffering. If you support +# file uploads then the value given on the first line has to be as large +# as the largest file you are willing to accept. The second value refers +# to the size of data, with files excluded. You want to keep that value as +# low as practical. +# +SecRequestBodyLimit 13107200 +SecRequestBodyNoFilesLimit 131072 + +# What to do if the request body size is above our configured limit. +# Keep in mind that this setting will automatically be set to ProcessPartial +# when SecRuleEngine is set to DetectionOnly mode in order to minimize +# disruptions when initially deploying ModSecurity. +# +SecRequestBodyLimitAction Reject + +# Maximum parsing depth allowed for JSON objects. You want to keep this +# value as low as practical. +# +SecRequestBodyJsonDepthLimit 512 + +# Maximum number of args allowed per request. You want to keep this +# value as low as practical. The value should match that in rule 200007. +SecArgumentsLimit 1000 + +# If SecArgumentsLimit has been set, you probably want to reject any +# request body that has only been partly parsed. The value used in this +# rule should match what was used with SecArgumentsLimit +SecRule &ARGS "@ge 1000" \ +"id:'200007', phase:2,t:none,log,deny,status:400,msg:'Failed to fully parse request body due to large argument count',severity:2" + +# Verify that we've correctly processed the request body. +# As a rule of thumb, when failing to process a request body +# you should reject the request (when deployed in blocking mode) +# or log a high-severity alert (when deployed in detection-only mode). +# +SecRule REQBODY_ERROR "!@eq 0" \ +"id:'200002', phase:2,t:none,log,deny,status:400,msg:'Failed to parse request body.',logdata:'%{reqbody_error_msg}',severity:2" + +# By default be strict with what we accept in the multipart/form-data +# request body. If the rule below proves to be too strict for your +# environment consider changing it to detection-only. You are encouraged +# _not_ to remove it altogether. +# +SecRule MULTIPART_STRICT_ERROR "!@eq 0" \ +"id:'200003',phase:2,t:none,log,deny,status:400, \ +msg:'Multipart request body failed strict validation: \ +PE %{REQBODY_PROCESSOR_ERROR}, \ +BQ %{MULTIPART_BOUNDARY_QUOTED}, \ +BW %{MULTIPART_BOUNDARY_WHITESPACE}, \ +DB %{MULTIPART_DATA_BEFORE}, \ +DA %{MULTIPART_DATA_AFTER}, \ +HF %{MULTIPART_HEADER_FOLDING}, \ +LF %{MULTIPART_LF_LINE}, \ +SM %{MULTIPART_MISSING_SEMICOLON}, \ +IQ %{MULTIPART_INVALID_QUOTING}, \ +IP %{MULTIPART_INVALID_PART}, \ +IH %{MULTIPART_INVALID_HEADER_FOLDING}, \ +FL %{MULTIPART_FILE_LIMIT_EXCEEDED}'" + +# Did we see anything that might be a boundary? +# +# Here is a short description about the ModSecurity Multipart parser: the +# parser returns with value 0, if all "boundary-like" line matches with +# the boundary string which given in MIME header. In any other cases it returns +# with different value, eg. 1 or 2. +# +# The RFC 1341 descript the multipart content-type and its syntax must contains +# only three mandatory lines (above the content): +# * Content-Type: multipart/mixed; boundary=BOUNDARY_STRING +# * --BOUNDARY_STRING +# * --BOUNDARY_STRING-- +# +# First line indicates, that this is a multipart content, second shows that +# here starts a part of the multipart content, third shows the end of content. +# +# If there are any other lines, which starts with "--", then it should be +# another boundary id - or not. +# +# After 3.0.3, there are two kinds of types of boundary errors: strict and permissive. +# +# If multipart content contains the three necessary lines with correct order, but +# there are one or more lines with "--", then parser returns with value 2 (non-zero). +# +# If some of the necessary lines (usually the start or end) misses, or the order +# is wrong, then parser returns with value 1 (also a non-zero). +# +# You can choose, which one is what you need. The example below contains the +# 'strict' mode, which means if there are any lines with start of "--", then +# ModSecurity blocked the content. But the next, commented example contains +# the 'permissive' mode, then you check only if the necessary lines exists in +# correct order. Whit this, you can enable to upload PEM files (eg "----BEGIN.."), +# or other text files, which contains eg. HTTP headers. +# +# The difference is only the operator - in strict mode (first) the content blocked +# in case of any non-zero value. In permissive mode (second, commented) the +# content blocked only if the value is explicit 1. If it 0 or 2, the content will +# allowed. +# + +# +# See #1747 and #1924 for further information on the possible values for +# MULTIPART_UNMATCHED_BOUNDARY. +# +SecRule MULTIPART_UNMATCHED_BOUNDARY "@eq 1" \ + "id:'200004',phase:2,t:none,log,deny,msg:'Multipart parser detected a possible unmatched boundary.'" + + +# PCRE Tuning +# We want to avoid a potential RegEx DoS condition +# +SecPcreMatchLimit 1000 +SecPcreMatchLimitRecursion 1000 + +# Some internal errors will set flags in TX and we will need to look for these. +# All of these are prefixed with "MSC_". The following flags currently exist: +# +# MSC_PCRE_LIMITS_EXCEEDED: PCRE match limits were exceeded. +# +SecRule TX:/^MSC_/ "!@streq 0" \ + "id:'200005',phase:2,t:none,log,deny,msg:'ModSecurity internal error flagged: %{MATCHED_VAR_NAME}'" + + +# -- Response body handling -------------------------------------------------- + +# Allow ModSecurity to access response bodies. +# You should have this directive enabled in order to identify errors +# and data leakage issues. +# +# Do keep in mind that enabling this directive does increases both +# memory consumption and response latency. +# +SecResponseBodyAccess On + +# Which response MIME types do you want to inspect? You should adjust the +# configuration below to catch documents but avoid static files +# (e.g., images and archives). +# +SecResponseBodyMimeType text/plain text/html text/xml + +# Buffer response bodies of up to 512 KB in length. +SecResponseBodyLimit 524288 + +# What happens when we encounter a response body larger than the configured +# limit? By default, we process what we have and let the rest through. +# That's somewhat less secure, but does not break any legitimate pages. +# +SecResponseBodyLimitAction ProcessPartial + + +# -- Filesystem configuration ------------------------------------------------ + +# The location where ModSecurity stores temporary files (for example, when +# it needs to handle a file upload that is larger than the configured limit). +# +# This default setting is chosen due to all systems have /tmp available however, +# this is less than ideal. It is recommended that you specify a location that's private. +# +SecTmpDir /tmp/ + +# The location where ModSecurity will keep its persistent data. This default setting +# is chosen due to all systems have /tmp available however, it +# too should be updated to a place that other users can't access. +# +SecDataDir /tmp/ + + +# -- File uploads handling configuration ------------------------------------- + +# The location where ModSecurity stores intercepted uploaded files. This +# location must be private to ModSecurity. You don't want other users on +# the server to access the files, do you? +# +#SecUploadDir /opt/modsecurity/var/upload/ + +# By default, only keep the files that were determined to be unusual +# in some way (by an external inspection script). For this to work you +# will also need at least one file inspection rule. +# +#SecUploadKeepFiles RelevantOnly + +# Uploaded files are by default created with permissions that do not allow +# any other user to access them. You may need to relax that if you want to +# interface ModSecurity to an external program (e.g., an anti-virus). +# +#SecUploadFileMode 0600 + + +# -- Debug log configuration ------------------------------------------------- + +# The default debug log configuration is to duplicate the error, warning +# and notice messages from the error log. +# +#SecDebugLog /opt/modsecurity/var/log/debug.log +#SecDebugLogLevel 3 + + +# -- Audit log configuration ------------------------------------------------- + +# Log the transactions that are marked by a rule, as well as those that +# trigger a server error (determined by a 5xx or 4xx, excluding 404, +# level response status codes). +# +SecAuditEngine RelevantOnly +SecAuditLogRelevantStatus "^(?:5|4(?!04))" + +# Log everything we know about a transaction. + +# Use a single file for logging. This is much easier to look at, but +# assumes that you will use the audit log only ocassionally. +# +SecAuditLog /dev/stdout +SecAuditLogFormat JSON +SecAuditLogType Serial +SecAuditLogParts ABCIJDEFHZ + +# Specify the path for concurrent audit logging. +#SecAuditLogStorageDir /opt/modsecurity/var/audit/ + + +# -- Miscellaneous ----------------------------------------------------------- + +# Use the most commonly used application/x-www-form-urlencoded parameter +# separator. There's probably only one application somewhere that uses +# something else so don't expect to change this value. +# +SecArgumentSeparator & + +# Settle on version 0 (zero) cookies, as that is what most applications +# use. Using an incorrect cookie version may open your installation to +# evasion attacks (against the rules that examine named cookies). +# +SecCookieFormat 0 + +# Specify your Unicode Code Point. +# This mapping is used by the t:urlDecodeUni transformation function +# to properly map encoded data to your language. Properly setting +# these directives helps to reduce false positives and negatives. +# +SecUnicodeMapFile unicode.mapping 20127 + +# Improve the quality of ModSecurity by sharing information about your +# current ModSecurity version and dependencies versions. +# The following information will be shared: ModSecurity version, +# Web Server version, APR version, PCRE version, Lua version, Libxml2 +# version, Anonymous unique id for host. +# NB: As of April 2022, there is no longer any advantage to turning this +# setting On, as there is no active receiver for the information. +SecStatusEngine Off diff --git a/kubernetes/main/apps/network/nginx/owasp-crs-storage/resources/nginx.yaml b/kubernetes/main/apps/network/nginx/owasp-crs-storage/resources/nginx.yaml new file mode 100644 index 000000000..d1212f591 --- /dev/null +++ b/kubernetes/main/apps/network/nginx/owasp-crs-storage/resources/nginx.yaml @@ -0,0 +1,1439 @@ +{{ $all := . }} +{{ $servers := .Servers }} +{{ $cfg := .Cfg }} +{{ $IsIPV6Enabled := .IsIPV6Enabled }} +{{ $healthzURI := .HealthzURI }} +{{ $backends := .Backends }} +{{ $proxyHeaders := .ProxySetHeaders }} +{{ $addHeaders := .AddHeaders }} + +# Configuration checksum: {{ $all.Cfg.Checksum }} + +# setup custom paths that do not require root access +pid {{ .PID }}; + +{{ if $cfg.UseGeoIP2 }} +load_module /etc/nginx/modules/ngx_http_geoip2_module.so; +{{ end }} + +{{ if $cfg.EnableBrotli }} +load_module /etc/nginx/modules/ngx_http_brotli_filter_module.so; +load_module /etc/nginx/modules/ngx_http_brotli_static_module.so; +{{ end }} + +{{ if (shouldLoadAuthDigestModule $servers) }} +load_module /etc/nginx/modules/ngx_http_auth_digest_module.so; +{{ end }} + +{{ if (shouldLoadModSecurityModule $cfg $servers) }} +load_module /etc/nginx/modules/ngx_http_modsecurity_module.so; +{{ end }} + +{{ if (shouldLoadOpentelemetryModule $cfg $servers) }} +load_module /etc/nginx/modules/otel_ngx_module.so; +{{ end }} + +daemon off; + +worker_processes {{ $cfg.WorkerProcesses }}; +{{ if gt (len $cfg.WorkerCPUAffinity) 0 }} +worker_cpu_affinity {{ $cfg.WorkerCPUAffinity }}; +{{ end }} + +worker_rlimit_nofile {{ $cfg.MaxWorkerOpenFiles }}; + +{{/* http://nginx.org/en/docs/ngx_core_module.html#worker_shutdown_timeout */}} +{{/* avoid waiting too long during a reload */}} +worker_shutdown_timeout {{ $cfg.WorkerShutdownTimeout }} ; + +{{ if not (empty $cfg.MainSnippet) }} +{{ $cfg.MainSnippet }} +{{ end }} + +events { + multi_accept {{ if $cfg.EnableMultiAccept }}on{{ else }}off{{ end }}; + worker_connections {{ $cfg.MaxWorkerConnections }}; + use epoll; + {{ range $index , $v := $cfg.DebugConnections }} + debug_connection {{ $v }}; + {{ end }} +} + +http { + {{ if (shouldLoadOpentelemetryModule $cfg $servers) }} + opentelemetry_config {{ $cfg.OpentelemetryConfig }}; + {{ end }} + + lua_package_path "/etc/nginx/lua/?.lua;;"; + + {{ buildLuaSharedDictionaries $cfg $servers }} + + lua_shared_dict luaconfig 5m; + + init_by_lua_file /etc/nginx/lua/ngx_conf_init.lua; + + init_worker_by_lua_file /etc/nginx/lua/ngx_conf_init_worker.lua; + + {{/* Enable the real_ip module only if we use either X-Forwarded headers or Proxy Protocol. */}} + {{/* we use the value of the real IP for the geo_ip module */}} + {{ if or (or $cfg.UseForwardedHeaders $cfg.UseProxyProtocol) $cfg.EnableRealIP }} + {{ if $cfg.UseProxyProtocol }} + real_ip_header proxy_protocol; + {{ else }} + real_ip_header {{ $cfg.ForwardedForHeader }}; + {{ end }} + + real_ip_recursive on; + {{ range $trusted_ip := $cfg.ProxyRealIPCIDR }} + set_real_ip_from {{ $trusted_ip }}; + {{ end }} + {{ end }} + + {{ if $all.Cfg.EnableModsecurity }} + modsecurity on; + + {{ if (not (empty $all.Cfg.ModsecuritySnippet)) }} + modsecurity_rules ' + {{ $all.Cfg.ModsecuritySnippet }} + '; + {{ else }} + modsecurity_rules_file /etc/nginx/modsecurity/modsecurity.conf; + {{ end }} + + {{ if $all.Cfg.EnableOWASPCoreRules }} + modsecurity_rules_file /etc/nginx/owasp-modsecurity-crs/nginx-modsecurity.conf; + {{ end }} + + {{ end }} + + {{ if $cfg.UseGeoIP2 }} + # https://github.com/leev/ngx_http_geoip2_module#example-usage + + {{ range $index, $file := $all.MaxmindEditionFiles }} + {{ if eq $file "GeoLite2-Country.mmdb" }} + geoip2 /etc/ingress-controller/geoip/GeoLite2-Country.mmdb { + {{ if (gt $cfg.GeoIP2AutoReloadMinutes 0) }} + auto_reload {{ $cfg.GeoIP2AutoReloadMinutes }}m; + {{ end }} + $geoip2_country_code source=$remote_addr country iso_code; + $geoip2_country_name source=$remote_addr country names en; + $geoip2_country_geoname_id source=$remote_addr country geoname_id; + $geoip2_continent_code source=$remote_addr continent code; + $geoip2_continent_name source=$remote_addr continent names en; + $geoip2_continent_geoname_id source=$remote_addr continent geoname_id; + } + {{ end }} + + {{ if eq $file "GeoIP2-Country.mmdb" }} + geoip2 /etc/ingress-controller/geoip/GeoIP2-Country.mmdb { + {{ if (gt $cfg.GeoIP2AutoReloadMinutes 0) }} + auto_reload {{ $cfg.GeoIP2AutoReloadMinutes }}m; + {{ end }} + $geoip2_country_code source=$remote_addr country iso_code; + $geoip2_country_name source=$remote_addr country names en; + $geoip2_country_geoname_id source=$remote_addr country geoname_id; + $geoip2_continent_code source=$remote_addr continent code; + $geoip2_continent_name source=$remote_addr continent names en; + $geoip2_continent_geoname_id source=$remote_addr continent geoname_id; + } + {{ end }} + + {{ if eq $file "GeoLite2-City.mmdb" }} + geoip2 /etc/ingress-controller/geoip/GeoLite2-City.mmdb { + {{ if (gt $cfg.GeoIP2AutoReloadMinutes 0) }} + auto_reload {{ $cfg.GeoIP2AutoReloadMinutes }}m; + {{ end }} + $geoip2_city_country_code source=$remote_addr country iso_code; + $geoip2_city_country_name source=$remote_addr country names en; + $geoip2_city_country_geoname_id source=$remote_addr country geoname_id; + $geoip2_city source=$remote_addr city names en; + $geoip2_city_geoname_id source=$remote_addr city geoname_id; + $geoip2_postal_code source=$remote_addr postal code; + $geoip2_dma_code source=$remote_addr location metro_code; + $geoip2_latitude source=$remote_addr location latitude; + $geoip2_longitude source=$remote_addr location longitude; + $geoip2_time_zone source=$remote_addr location time_zone; + $geoip2_region_code source=$remote_addr subdivisions 0 iso_code; + $geoip2_region_name source=$remote_addr subdivisions 0 names en; + $geoip2_region_geoname_id source=$remote_addr subdivisions 0 geoname_id; + $geoip2_subregion_code source=$remote_addr subdivisions 1 iso_code; + $geoip2_subregion_name source=$remote_addr subdivisions 1 names en; + $geoip2_subregion_geoname_id source=$remote_addr subdivisions 1 geoname_id; + $geoip2_city_continent_code source=$remote_addr continent code; + $geoip2_city_continent_name source=$remote_addr continent names en; + } + {{ end }} + + {{ if eq $file "GeoIP2-City.mmdb" }} + geoip2 /etc/ingress-controller/geoip/GeoIP2-City.mmdb { + {{ if (gt $cfg.GeoIP2AutoReloadMinutes 0) }} + auto_reload {{ $cfg.GeoIP2AutoReloadMinutes }}m; + {{ end }} + $geoip2_city_country_code source=$remote_addr country iso_code; + $geoip2_city_country_name source=$remote_addr country names en; + $geoip2_city_country_geoname_id source=$remote_addr country geoname_id; + $geoip2_city source=$remote_addr city names en; + $geoip2_city_geoname_id source=$remote_addr city geoname_id; + $geoip2_postal_code source=$remote_addr postal code; + $geoip2_dma_code source=$remote_addr location metro_code; + $geoip2_latitude source=$remote_addr location latitude; + $geoip2_longitude source=$remote_addr location longitude; + $geoip2_time_zone source=$remote_addr location time_zone; + $geoip2_region_code source=$remote_addr subdivisions 0 iso_code; + $geoip2_region_name source=$remote_addr subdivisions 0 names en; + $geoip2_region_geoname_id source=$remote_addr subdivisions 0 geoname_id; + $geoip2_subregion_code source=$remote_addr subdivisions 1 iso_code; + $geoip2_subregion_name source=$remote_addr subdivisions 1 names en; + $geoip2_subregion_geoname_id source=$remote_addr subdivisions 1 geoname_id; + $geoip2_city_continent_code source=$remote_addr continent code; + $geoip2_city_continent_name source=$remote_addr continent names en; + } + {{ end }} + + {{ if eq $file "GeoLite2-ASN.mmdb" }} + geoip2 /etc/ingress-controller/geoip/GeoLite2-ASN.mmdb { + {{ if (gt $cfg.GeoIP2AutoReloadMinutes 0) }} + auto_reload {{ $cfg.GeoIP2AutoReloadMinutes }}m; + {{ end }} + $geoip2_asn source=$remote_addr autonomous_system_number; + $geoip2_org source=$remote_addr autonomous_system_organization; + } + {{ end }} + + {{ if eq $file "GeoIP2-ASN.mmdb" }} + geoip2 /etc/ingress-controller/geoip/GeoIP2-ASN.mmdb { + {{ if (gt $cfg.GeoIP2AutoReloadMinutes 0) }} + auto_reload {{ $cfg.GeoIP2AutoReloadMinutes }}m; + {{ end }} + $geoip2_asn source=$remote_addr autonomous_system_number; + $geoip2_org source=$remote_addr autonomous_system_organization; + } + {{ end }} + + {{ if eq $file "GeoIP2-ISP.mmdb" }} + geoip2 /etc/ingress-controller/geoip/GeoIP2-ISP.mmdb { + {{ if (gt $cfg.GeoIP2AutoReloadMinutes 0) }} + auto_reload {{ $cfg.GeoIP2AutoReloadMinutes }}m; + {{ end }} + $geoip2_isp source=$remote_addr isp; + $geoip2_isp_org source=$remote_addr organization; + $geoip2_asn source=$remote_addr default=0 autonomous_system_number; + } + {{ end }} + + {{ if eq $file "GeoIP2-Connection-Type.mmdb" }} + geoip2 /etc/ingress-controller/geoip/GeoIP2-Connection-Type.mmdb { + $geoip2_connection_type connection_type; + } + {{ end }} + + {{ if eq $file "GeoIP2-Anonymous-IP.mmdb" }} + geoip2 /etc/ingress-controller/geoip/GeoIP2-Anonymous-IP.mmdb { + {{ if (gt $cfg.GeoIP2AutoReloadMinutes 0) }} + auto_reload {{ $cfg.GeoIP2AutoReloadMinutes }}m; + {{ end }} + $geoip2_is_anon source=$remote_addr is_anonymous; + $geoip2_is_anonymous source=$remote_addr default=0 is_anonymous; + $geoip2_is_anonymous_vpn source=$remote_addr default=0 is_anonymous_vpn; + $geoip2_is_hosting_provider source=$remote_addr default=0 is_hosting_provider; + $geoip2_is_public_proxy source=$remote_addr default=0 is_public_proxy; + $geoip2_is_tor_exit_node source=$remote_addr default=0 is_tor_exit_node; + } + {{ end }} + + {{ end }} + + {{ end }} + + aio threads; + + {{ if $cfg.EnableAioWrite }} + aio_write on; + {{ end }} + + tcp_nopush on; + tcp_nodelay on; + + log_subrequest on; + + reset_timedout_connection on; + + keepalive_timeout {{ $cfg.KeepAlive }}s; + keepalive_requests {{ $cfg.KeepAliveRequests }}; + + client_body_temp_path /tmp/nginx/client-body; + fastcgi_temp_path /tmp/nginx/fastcgi-temp; + proxy_temp_path /tmp/nginx/proxy-temp; + + client_header_buffer_size {{ $cfg.ClientHeaderBufferSize }}; + client_header_timeout {{ $cfg.ClientHeaderTimeout }}s; + large_client_header_buffers {{ $cfg.LargeClientHeaderBuffers }}; + client_body_buffer_size {{ $cfg.ClientBodyBufferSize }}; + client_body_timeout {{ $cfg.ClientBodyTimeout }}s; + + {{ if gt $cfg.GRPCBufferSizeKb 0 }} + grpc_buffer_size {{ $cfg.GRPCBufferSizeKb }}k; + {{ end }} + + {{ if and (ne $cfg.HTTP2MaxHeaderSize "") (ne $cfg.HTTP2MaxFieldSize "") }} + http2_max_field_size {{ $cfg.HTTP2MaxFieldSize }}; + http2_max_header_size {{ $cfg.HTTP2MaxHeaderSize }}; + {{ end }} + + {{ if (gt $cfg.HTTP2MaxRequests 0) }} + http2_max_requests {{ $cfg.HTTP2MaxRequests }}; + {{ end }} + + http2_max_concurrent_streams {{ $cfg.HTTP2MaxConcurrentStreams }}; + + types_hash_max_size 2048; + server_names_hash_max_size {{ $cfg.ServerNameHashMaxSize }}; + server_names_hash_bucket_size {{ $cfg.ServerNameHashBucketSize }}; + map_hash_bucket_size {{ $cfg.MapHashBucketSize }}; + + proxy_headers_hash_max_size {{ $cfg.ProxyHeadersHashMaxSize }}; + proxy_headers_hash_bucket_size {{ $cfg.ProxyHeadersHashBucketSize }}; + + variables_hash_bucket_size {{ $cfg.VariablesHashBucketSize }}; + variables_hash_max_size {{ $cfg.VariablesHashMaxSize }}; + + underscores_in_headers {{ if $cfg.EnableUnderscoresInHeaders }}on{{ else }}off{{ end }}; + ignore_invalid_headers {{ if $cfg.IgnoreInvalidHeaders }}on{{ else }}off{{ end }}; + + limit_req_status {{ $cfg.LimitReqStatusCode }}; + limit_conn_status {{ $cfg.LimitConnStatusCode }}; + + {{ buildOpentelemetry $cfg $servers }} + + include /etc/nginx/mime.types; + default_type {{ $cfg.DefaultType }}; + + {{ if $cfg.EnableBrotli }} + brotli on; + brotli_comp_level {{ $cfg.BrotliLevel }}; + brotli_min_length {{ $cfg.BrotliMinLength }}; + brotli_types {{ $cfg.BrotliTypes }}; + {{ end }} + + {{ if $cfg.UseGzip }} + gzip on; + gzip_comp_level {{ $cfg.GzipLevel }}; + {{- if $cfg.GzipDisable }} + gzip_disable "{{ $cfg.GzipDisable }}"; + {{- end }} + gzip_http_version 1.1; + gzip_min_length {{ $cfg.GzipMinLength}}; + gzip_types {{ $cfg.GzipTypes }}; + gzip_proxied any; + gzip_vary on; + {{ end }} + + # Custom headers for response + {{ range $k, $v := $addHeaders }} + more_set_headers {{ printf "%s: %s" $k $v | quote }}; + {{ end }} + + server_tokens {{ if $cfg.ShowServerTokens }}on{{ else }}off{{ end }}; + {{ if not $cfg.ShowServerTokens }} + more_clear_headers Server; + {{ end }} + + # disable warnings + uninitialized_variable_warn off; + + # Additional available variables: + # $namespace + # $ingress_name + # $service_name + # $service_port + log_format upstreaminfo {{ if $cfg.LogFormatEscapeNone }}escape=none {{ else if $cfg.LogFormatEscapeJSON }}escape=json {{ end }}'{{ $cfg.LogFormatUpstream }}'; + + {{/* map urls that should not appear in access.log */}} + {{/* http://nginx.org/en/docs/http/ngx_http_log_module.html#access_log */}} + map $request_uri $loggable { + {{ range $reqUri := $cfg.SkipAccessLogURLs }} + {{ $reqUri }} 0;{{ end }} + default 1; + } + + {{ if or $cfg.DisableAccessLog $cfg.DisableHTTPAccessLog }} + access_log off; + {{ else }} + {{ if $cfg.EnableSyslog }} + access_log syslog:server={{ $cfg.SyslogHost }}:{{ $cfg.SyslogPort }} upstreaminfo if=$loggable; + {{ else }} + access_log {{ or $cfg.HTTPAccessLogPath $cfg.AccessLogPath }} upstreaminfo {{ $cfg.AccessLogParams }} if=$loggable; + {{ end }} + {{ end }} + + {{ if $cfg.EnableSyslog }} + error_log syslog:server={{ $cfg.SyslogHost }}:{{ $cfg.SyslogPort }} {{ $cfg.ErrorLogLevel }}; + {{ else }} + error_log {{ $cfg.ErrorLogPath }} {{ $cfg.ErrorLogLevel }}; + {{ end }} + + {{ buildResolvers $cfg.Resolver $cfg.DisableIpv6DNS }} + + # See https://www.nginx.com/blog/websocket-nginx + map $http_upgrade $connection_upgrade { + default upgrade; + {{ if (gt $cfg.UpstreamKeepaliveConnections 0) }} + # See http://nginx.org/en/docs/http/ngx_http_upstream_module.html#keepalive + '' ''; + {{ else }} + '' close; + {{ end }} + } + + # Reverse proxies can detect if a client provides a X-Request-ID header, and pass it on to the backend server. + # If no such header is provided, it can provide a random value. + map $http_x_request_id $req_id { + default $http_x_request_id; + {{ if $cfg.GenerateRequestID }} + "" $request_id; + {{ end }} + } + + {{ if and $cfg.UseForwardedHeaders $cfg.ComputeFullForwardedFor }} + # We can't use $proxy_add_x_forwarded_for because the realip module + # replaces the remote_addr too soon + map $http_x_forwarded_for $full_x_forwarded_for { + {{ if $all.Cfg.UseProxyProtocol }} + default "$http_x_forwarded_for, $proxy_protocol_addr"; + '' "$proxy_protocol_addr"; + {{ else }} + default "$http_x_forwarded_for, $realip_remote_addr"; + '' "$realip_remote_addr"; + {{ end}} + } + + {{ end }} + + # Create a variable that contains the literal $ character. + # This works because the geo module will not resolve variables. + geo $literal_dollar { + default "$"; + } + + server_name_in_redirect off; + port_in_redirect off; + + ssl_protocols {{ $cfg.SSLProtocols }}; + + ssl_early_data {{ if $cfg.SSLEarlyData }}on{{ else }}off{{ end }}; + + # turn on session caching to drastically improve performance + {{ if $cfg.SSLSessionCache }} + ssl_session_cache shared:SSL:{{ $cfg.SSLSessionCacheSize }}; + ssl_session_timeout {{ $cfg.SSLSessionTimeout }}; + {{ end }} + + # allow configuring ssl session tickets + ssl_session_tickets {{ if $cfg.SSLSessionTickets }}on{{ else }}off{{ end }}; + + {{ if not (empty $cfg.SSLSessionTicketKey ) }} + ssl_session_ticket_key /etc/ingress-controller/tickets.key; + {{ end }} + + # slightly reduce the time-to-first-byte + ssl_buffer_size {{ $cfg.SSLBufferSize }}; + + {{ if not (empty $cfg.SSLCiphers) }} + # allow configuring custom ssl ciphers + ssl_ciphers '{{ $cfg.SSLCiphers }}'; + ssl_prefer_server_ciphers on; + {{ end }} + + {{ if not (empty $cfg.SSLDHParam) }} + # allow custom DH file http://nginx.org/en/docs/http/ngx_http_ssl_module.html#ssl_dhparam + ssl_dhparam {{ $cfg.SSLDHParam }}; + {{ end }} + + ssl_ecdh_curve {{ $cfg.SSLECDHCurve }}; + + # PEM sha: {{ $cfg.DefaultSSLCertificate.PemSHA }} + ssl_certificate {{ $cfg.DefaultSSLCertificate.PemFileName }}; + ssl_certificate_key {{ $cfg.DefaultSSLCertificate.PemFileName }}; + + {{ if and $cfg.CustomHTTPErrors (not $cfg.DisableProxyInterceptErrors) }} + proxy_intercept_errors on; + {{ end }} + + {{ range $errCode := $cfg.CustomHTTPErrors }} + error_page {{ $errCode }} = @custom_upstream-default-backend_{{ $errCode }};{{ end }} + + proxy_ssl_session_reuse on; + + {{ if $cfg.AllowBackendServerHeader }} + proxy_pass_header Server; + {{ end }} + + {{ range $header := $cfg.HideHeaders }}proxy_hide_header {{ $header }}; + {{ end }} + + {{ if not (empty $cfg.HTTPSnippet) }} + # Custom code snippet configured in the configuration configmap + {{ $cfg.HTTPSnippet }} + {{ end }} + + upstream upstream_balancer { + ### Attention!!! + # + # We no longer create "upstream" section for every backend. + # Backends are handled dynamically using Lua. If you would like to debug + # and see what backends ingress-nginx has in its memory you can + # install our kubectl plugin https://kubernetes.github.io/ingress-nginx/kubectl-plugin. + # Once you have the plugin you can use "kubectl ingress-nginx backends" command to + # inspect current backends. + # + ### + + server 0.0.0.1; # placeholder + + balancer_by_lua_file /etc/nginx/lua/nginx/ngx_conf_balancer.lua; + + {{ if (gt $cfg.UpstreamKeepaliveConnections 0) }} + keepalive {{ $cfg.UpstreamKeepaliveConnections }}; + keepalive_time {{ $cfg.UpstreamKeepaliveTime }}; + keepalive_timeout {{ $cfg.UpstreamKeepaliveTimeout }}s; + keepalive_requests {{ $cfg.UpstreamKeepaliveRequests }}; + {{ end }} + } + + {{ range $rl := (filterRateLimits $servers ) }} + # Ratelimit {{ $rl.Name }} + geo $remote_addr $allowlist_{{ $rl.ID }} { + default 0; + {{ range $ip := $rl.Allowlist }} + {{ $ip }} 1;{{ end }} + } + + # Ratelimit {{ $rl.Name }} + map $allowlist_{{ $rl.ID }} $limit_{{ $rl.ID }} { + 0 {{ $cfg.LimitConnZoneVariable }}; + 1 ""; + } + {{ end }} + + {{/* build all the required rate limit zones. Each annotation requires a dedicated zone */}} + {{/* 1MB -> 16 thousand 64-byte states or about 8 thousand 128-byte states */}} + {{ range $zone := (buildRateLimitZones $servers) }} + {{ $zone }} + {{ end }} + + # Cache for internal auth checks + proxy_cache_path /tmp/nginx/nginx-cache-auth levels=1:2 keys_zone=auth_cache:10m max_size=128m inactive=30m use_temp_path=off; + + # Global filters + {{ range $ip := $cfg.BlockCIDRs }}deny {{ trimSpace $ip }}; + {{ end }} + + {{ if gt (len $cfg.BlockUserAgents) 0 }} + map $http_user_agent $block_ua { + default 0; + + {{ range $ua := $cfg.BlockUserAgents }}{{ trimSpace $ua }} 1; + {{ end }} + } + {{ end }} + + {{ if gt (len $cfg.BlockReferers) 0 }} + map $http_referer $block_ref { + default 0; + + {{ range $ref := $cfg.BlockReferers }}{{ trimSpace $ref }} 1; + {{ end }} + } + {{ end }} + + {{/* Build server redirects (from/to www) */}} + {{ range $redirect := .RedirectServers }} + ## start server {{ $redirect.From }} + server { + server_name {{ $redirect.From }}; + + {{ buildHTTPListener $all $redirect.From }} + {{ buildHTTPSListener $all $redirect.From }} + + ssl_certificate_by_lua_file /etc/nginx/lua/nginx/ngx_conf_certificate.lua; + + {{ if gt (len $cfg.BlockUserAgents) 0 }} + if ($block_ua) { + return 403; + } + {{ end }} + {{ if gt (len $cfg.BlockReferers) 0 }} + if ($block_ref) { + return 403; + } + {{ end }} + + set_by_lua_file $redirect_to /etc/nginx/lua/nginx/ngx_srv_redirect.lua {{ $redirect.To }}; + + return {{ $all.Cfg.HTTPRedirectCode }} $redirect_to; + } + ## end server {{ $redirect.From }} + {{ end }} + + {{ range $server := $servers }} + {{ range $location := $server.Locations }} + {{ $applyGlobalAuth := shouldApplyGlobalAuth $location $all.Cfg.GlobalExternalAuth.URL }} + {{ $applyAuthUpstream := shouldApplyAuthUpstream $location $all.Cfg }} + {{ if and (eq $applyAuthUpstream true) (eq $applyGlobalAuth false) }} + ## start auth upstream {{ $server.Hostname }}{{ $location.Path }} + upstream {{ buildAuthUpstreamName $location $server.Hostname }} { + {{- $externalAuth := $location.ExternalAuth }} + server {{ extractHostPort $externalAuth.URL }}; + + keepalive {{ $externalAuth.KeepaliveConnections }}; + keepalive_requests {{ $externalAuth.KeepaliveRequests }}; + keepalive_timeout {{ $externalAuth.KeepaliveTimeout }}s; + } + ## end auth upstream {{ $server.Hostname }}{{ $location.Path }} + {{ end }} + {{ end }} + {{ end }} + + {{ range $server := $servers }} + ## start server {{ $server.Hostname }} + server { + server_name {{ buildServerName $server.Hostname }} {{range $server.Aliases }}{{ . }} {{ end }}; + + {{ if $cfg.UseHTTP2 }} + http2 on; + {{ end }} + + {{ if gt (len $cfg.BlockUserAgents) 0 }} + if ($block_ua) { + return 403; + } + {{ end }} + {{ if gt (len $cfg.BlockReferers) 0 }} + if ($block_ref) { + return 403; + } + {{ end }} + + {{ template "SERVER" serverConfig $all $server }} + + {{ if not (empty $cfg.ServerSnippet) }} + # Custom code snippet configured in the configuration configmap + {{ $cfg.ServerSnippet }} + {{ end }} + + {{ template "CUSTOM_ERRORS" (buildCustomErrorDeps "upstream-default-backend" $cfg.CustomHTTPErrors $all.EnableMetrics $cfg.EnableModsecurity) }} + } + ## end server {{ $server.Hostname }} + + {{ end }} + + # backend for when default-backend-service is not configured or it does not have endpoints + server { + listen {{ $all.ListenPorts.Default }} default_server {{ if $all.Cfg.ReusePort }}reuseport{{ end }} backlog={{ $all.BacklogSize }}; + {{ if $IsIPV6Enabled }}listen [::]:{{ $all.ListenPorts.Default }} default_server {{ if $all.Cfg.ReusePort }}reuseport{{ end }} backlog={{ $all.BacklogSize }};{{ end }} + set $proxy_upstream_name "internal"; + + access_log off; + + location / { + return 404; + } + } + + # default server, used for NGINX healthcheck and access to nginx stats + server { + # Ensure that modsecurity will not run on an internal location as this is not accessible from outside + {{ if $all.Cfg.EnableModsecurity }} + modsecurity off; + {{ end }} + + listen 127.0.0.1:{{ .StatusPort }}; + set $proxy_upstream_name "internal"; + + keepalive_timeout 0; + gzip off; + + access_log off; + + {{ if $cfg.EnableOpentelemetry }} + opentelemetry off; + {{ end }} + location {{ $healthzURI }} { + return 200; + } + + location /is-dynamic-lb-initialized { + content_by_lua_file /etc/nginx/lua/nginx/ngx_conf_is_dynamic_lb_initialized.lua; + } + + location {{ .StatusPath }} { + stub_status on; + } + + location /configuration { + client_max_body_size {{ luaConfigurationRequestBodySize $cfg }}; + client_body_buffer_size {{ luaConfigurationRequestBodySize $cfg }}; + proxy_buffering off; + + content_by_lua_file /etc/nginx/lua/nginx/ngx_conf_configuration.lua; + } + + location / { + return 404; + } + } +} + +stream { + lua_package_path "/etc/nginx/lua/?.lua;/etc/nginx/lua/vendor/?.lua;;"; + + lua_shared_dict tcp_udp_configuration_data 5M; + + {{ buildResolvers $cfg.Resolver $cfg.DisableIpv6DNS }} + + init_by_lua_file /etc/nginx/lua/ngx_conf_init_stream.lua; + + init_worker_by_lua_file /etc/nginx/lua/nginx/ngx_conf_init_tcp_udp.lua; + + lua_add_variable $proxy_upstream_name; + + log_format log_stream '{{ $cfg.LogFormatStream }}'; + + {{ if or $cfg.DisableAccessLog $cfg.DisableStreamAccessLog }} + access_log off; + {{ else }} + access_log {{ or $cfg.StreamAccessLogPath $cfg.AccessLogPath }} log_stream {{ $cfg.AccessLogParams }}; + {{ end }} + + + error_log {{ $cfg.ErrorLogPath }} {{ $cfg.ErrorLogLevel }}; + {{ if $cfg.EnableRealIP }} + {{ range $trusted_ip := $cfg.ProxyRealIPCIDR }} + set_real_ip_from {{ $trusted_ip }}; + {{ end }} + {{ end }} + + upstream upstream_balancer { + server 0.0.0.1:1234; # placeholder + balancer_by_lua_file /etc/nginx/lua/nginx/ngx_conf_balancer_tcp_udp.lua; + } + + server { + listen 127.0.0.1:{{ .StreamPort }}; + + access_log off; + + content_by_lua_file /etc/nginx/lua/nginx/ngx_conf_content_tcp_udp.lua; + } + + # TCP services + {{ range $tcpServer := .TCPBackends }} + server { + preread_by_lua_block { + ngx.var.proxy_upstream_name="tcp-{{ $tcpServer.Backend.Namespace }}-{{ $tcpServer.Backend.Name }}-{{ $tcpServer.Backend.Port }}"; + } + + {{ range $address := $all.Cfg.BindAddressIpv4 }} + listen {{ $address }}:{{ $tcpServer.Port }}{{ if $tcpServer.Backend.ProxyProtocol.Decode }} proxy_protocol{{ end }}; + {{ else }} + listen {{ $tcpServer.Port }}{{ if $tcpServer.Backend.ProxyProtocol.Decode }} proxy_protocol{{ end }}; + {{ end }} + {{ if $IsIPV6Enabled }} + {{ range $address := $all.Cfg.BindAddressIpv6 }} + listen {{ $address }}:{{ $tcpServer.Port }}{{ if $tcpServer.Backend.ProxyProtocol.Decode }} proxy_protocol{{ end }}; + {{ else }} + listen [::]:{{ $tcpServer.Port }}{{ if $tcpServer.Backend.ProxyProtocol.Decode }} proxy_protocol{{ end }}; + {{ end }} + {{ end }} + proxy_timeout {{ $cfg.ProxyStreamTimeout }}; + proxy_next_upstream {{ if $cfg.ProxyStreamNextUpstream }}on{{ else }}off{{ end }}; + proxy_next_upstream_timeout {{ $cfg.ProxyStreamNextUpstreamTimeout }}; + proxy_next_upstream_tries {{ $cfg.ProxyStreamNextUpstreamTries }}; + + proxy_pass upstream_balancer; + {{ if $tcpServer.Backend.ProxyProtocol.Encode }} + proxy_protocol on; + {{ end }} + } + {{ end }} + + # UDP services + {{ range $udpServer := .UDPBackends }} + server { + preread_by_lua_block { + ngx.var.proxy_upstream_name="udp-{{ $udpServer.Backend.Namespace }}-{{ $udpServer.Backend.Name }}-{{ $udpServer.Backend.Port }}"; + } + + {{ range $address := $all.Cfg.BindAddressIpv4 }} + listen {{ $address }}:{{ $udpServer.Port }} udp; + {{ else }} + listen {{ $udpServer.Port }} udp; + {{ end }} + {{ if $IsIPV6Enabled }} + {{ range $address := $all.Cfg.BindAddressIpv6 }} + listen {{ $address }}:{{ $udpServer.Port }} udp; + {{ else }} + listen [::]:{{ $udpServer.Port }} udp; + {{ end }} + {{ end }} + proxy_responses {{ $cfg.ProxyStreamResponses }}; + proxy_timeout {{ $cfg.ProxyStreamTimeout }}; + proxy_next_upstream {{ if $cfg.ProxyStreamNextUpstream }}on{{ else }}off{{ end }}; + proxy_next_upstream_timeout {{ $cfg.ProxyStreamNextUpstreamTimeout }}; + proxy_next_upstream_tries {{ $cfg.ProxyStreamNextUpstreamTries }}; + proxy_pass upstream_balancer; + } + {{ end }} + + # Stream Snippets + {{ range $snippet := .StreamSnippets }} + {{ $snippet }} + {{ end }} +} + +{{/* definition of templates to avoid repetitions */}} +{{ define "CUSTOM_ERRORS" }} + {{ $enableMetrics := .EnableMetrics }} + {{ $modsecurityEnabled := .ModsecurityEnabled }} + {{ $upstreamName := .UpstreamName }} + {{ range $errCode := .ErrorCodes }} + location @custom_{{ $upstreamName }}_{{ $errCode }} { + internal; + + # Ensure that modsecurity will not run on custom error pages or they might be blocked + {{ if $modsecurityEnabled }} + modsecurity off; + {{ end }} + + proxy_intercept_errors off; + + proxy_set_header X-Code {{ $errCode }}; + proxy_set_header X-Format $http_accept; + proxy_set_header X-Original-URI $request_uri; + proxy_set_header X-Namespace $namespace; + proxy_set_header X-Ingress-Name $ingress_name; + proxy_set_header X-Service-Name $service_name; + proxy_set_header X-Service-Port $service_port; + proxy_set_header X-Request-ID $req_id; + proxy_set_header X-Forwarded-For $remote_addr; + proxy_set_header Host $best_http_host; + + set $proxy_upstream_name {{ $upstreamName | quote }}; + + rewrite (.*) / break; + + proxy_pass http://upstream_balancer; + {{ if $enableMetrics }} + log_by_lua_file /etc/nginx/lua/nginx/ngx_conf_log.lua; + {{ end }} + } + {{ end }} +{{ end }} + +{{/* CORS support from https://michielkalkman.com/snippets/nginx-cors-open-configuration.html */}} +{{ define "CORS" }} + {{ $cors := .CorsConfig }} + # Cors Preflight methods needs additional options and different Return Code + {{ if $cors.CorsAllowOrigin }} + {{ buildCorsOriginRegex $cors.CorsAllowOrigin }} + {{ end }} + if ($request_method = 'OPTIONS') { + set $cors ${cors}options; + } + + if ($cors = "true") { + more_set_headers 'Access-Control-Allow-Origin: $http_origin'; + {{ if $cors.CorsAllowCredentials }} more_set_headers 'Access-Control-Allow-Credentials: {{ $cors.CorsAllowCredentials }}'; {{ end }} + more_set_headers 'Access-Control-Allow-Methods: {{ $cors.CorsAllowMethods }}'; + more_set_headers 'Access-Control-Allow-Headers: {{ $cors.CorsAllowHeaders }}'; + {{ if not (empty $cors.CorsExposeHeaders) }} more_set_headers 'Access-Control-Expose-Headers: {{ $cors.CorsExposeHeaders }}'; {{ end }} + more_set_headers 'Access-Control-Max-Age: {{ $cors.CorsMaxAge }}'; + } + + if ($cors = "trueoptions") { + more_set_headers 'Access-Control-Allow-Origin: $http_origin'; + {{ if $cors.CorsAllowCredentials }} more_set_headers 'Access-Control-Allow-Credentials: {{ $cors.CorsAllowCredentials }}'; {{ end }} + more_set_headers 'Access-Control-Allow-Methods: {{ $cors.CorsAllowMethods }}'; + more_set_headers 'Access-Control-Allow-Headers: {{ $cors.CorsAllowHeaders }}'; + {{ if not (empty $cors.CorsExposeHeaders) }} more_set_headers 'Access-Control-Expose-Headers: {{ $cors.CorsExposeHeaders }}'; {{ end }} + more_set_headers 'Access-Control-Max-Age: {{ $cors.CorsMaxAge }}'; + more_set_headers 'Content-Type: text/plain charset=UTF-8'; + more_set_headers 'Content-Length: 0'; + return 204; + } +{{ end }} + +{{/* definition of server-template to avoid repetitions with server-alias */}} +{{ define "SERVER" }} + {{ $all := .First }} + {{ $server := .Second }} + + {{ buildHTTPListener $all $server.Hostname }} + {{ buildHTTPSListener $all $server.Hostname }} + + set $proxy_upstream_name "-"; + + {{ if not ( empty $server.CertificateAuth.MatchCN ) }} + {{ if gt (len $server.CertificateAuth.MatchCN) 0 }} + if ( $ssl_client_s_dn !~ {{ $server.CertificateAuth.MatchCN }} ) { + return 403 "client certificate unauthorized"; + } + {{ end }} + {{ end }} + + {{ if eq $server.Hostname "_" }} + ssl_reject_handshake {{ if $all.Cfg.SSLRejectHandshake }}on{{ else }}off{{ end }}; + {{ end }} + + ssl_certificate_by_lua_file /etc/nginx/lua/nginx/ngx_conf_certificate.lua; + + {{ if not (empty $server.AuthTLSError) }} + # {{ $server.AuthTLSError }} + return 403; + {{ else }} + + {{ if not (empty $server.CertificateAuth.CAFileName) }} + # PEM sha: {{ $server.CertificateAuth.CASHA }} + ssl_client_certificate {{ $server.CertificateAuth.CAFileName }}; + ssl_verify_client {{ $server.CertificateAuth.VerifyClient }}; + ssl_verify_depth {{ $server.CertificateAuth.ValidationDepth }}; + + {{ if not (empty $server.CertificateAuth.CRLFileName) }} + # PEM sha: {{ $server.CertificateAuth.CRLSHA }} + ssl_crl {{ $server.CertificateAuth.CRLFileName }}; + {{ end }} + + {{ if not (empty $server.CertificateAuth.ErrorPage)}} + error_page 495 496 = {{ $server.CertificateAuth.ErrorPage }}; + {{ end }} + {{ end }} + + {{ if not (empty $server.ProxySSL.CAFileName) }} + # PEM sha: {{ $server.ProxySSL.CASHA }} + proxy_ssl_trusted_certificate {{ $server.ProxySSL.CAFileName }}; + proxy_ssl_ciphers {{ $server.ProxySSL.Ciphers }}; + proxy_ssl_protocols {{ $server.ProxySSL.Protocols }}; + proxy_ssl_verify {{ $server.ProxySSL.Verify }}; + proxy_ssl_verify_depth {{ $server.ProxySSL.VerifyDepth }}; + {{ if not (empty $server.ProxySSL.ProxySSLName) }} + proxy_ssl_name {{ $server.ProxySSL.ProxySSLName }}; + proxy_ssl_server_name {{ $server.ProxySSL.ProxySSLServerName }}; + {{ end }} + {{ end }} + + {{ if not (empty $server.ProxySSL.PemFileName) }} + proxy_ssl_certificate {{ $server.ProxySSL.PemFileName }}; + proxy_ssl_certificate_key {{ $server.ProxySSL.PemFileName }}; + {{ end }} + + {{ if not (empty $server.SSLCiphers) }} + ssl_ciphers {{ $server.SSLCiphers }}; + {{ end }} + + {{ if not (empty $server.SSLPreferServerCiphers) }} + ssl_prefer_server_ciphers {{ $server.SSLPreferServerCiphers }}; + {{ end }} + + {{ if not (empty $server.ServerSnippet) }} + # Custom code snippet configured for host {{ $server.Hostname }} + {{ $server.ServerSnippet }} + {{ end }} + + {{ range $errorLocation := (buildCustomErrorLocationsPerServer $server) }} + {{ template "CUSTOM_ERRORS" (buildCustomErrorDeps $errorLocation.UpstreamName $errorLocation.Codes $all.EnableMetrics $all.Cfg.EnableModsecurity) }} + {{ end }} + + {{ buildMirrorLocations $server.Locations }} + + {{ $enforceRegex := enforceRegexModifier $server.Locations }} + {{ range $location := $server.Locations }} + {{ $path := buildLocation $location $enforceRegex }} + {{ $proxySetHeader := proxySetHeader $location }} + {{ $authPath := buildAuthLocation $location $all.Cfg.GlobalExternalAuth.URL }} + {{ $applyGlobalAuth := shouldApplyGlobalAuth $location $all.Cfg.GlobalExternalAuth.URL }} + {{ $applyAuthUpstream := shouldApplyAuthUpstream $location $all.Cfg }} + + {{ $externalAuth := $location.ExternalAuth }} + {{ if eq $applyGlobalAuth true }} + {{ $externalAuth = $all.Cfg.GlobalExternalAuth }} + {{ end }} + + {{ if not (empty $location.Rewrite.AppRoot) }} + if ($uri = /) { + return 302 $scheme://$http_host{{ $location.Rewrite.AppRoot }}; + } + {{ end }} + + {{ if $authPath }} + location = {{ $authPath }} { + internal; + + {{ if (or $all.Cfg.EnableOpentelemetry $location.Opentelemetry.Enabled) }} + opentelemetry on; + opentelemetry_propagate; + {{ end }} + + {{ if not $all.Cfg.EnableAuthAccessLog }} + access_log off; + {{ end }} + + # Ensure that modsecurity will not run on an internal location as this is not accessible from outside + {{ if $all.Cfg.EnableModsecurity }} + modsecurity off; + {{ end }} + + {{ if $externalAuth.AuthCacheKey }} + set $tmp_cache_key '{{ $server.Hostname }}{{ $authPath }}{{ $externalAuth.AuthCacheKey }}'; + set $cache_key ''; + + rewrite_by_lua_file /etc/nginx/lua/nginx/ngx_conf_rewrite_auth.lua; + + proxy_cache auth_cache; + + {{- range $dur := $externalAuth.AuthCacheDuration }} + proxy_cache_valid {{ $dur }}; + {{- end }} + + proxy_cache_key "$cache_key"; + {{ end }} + + # ngx_auth_request module overrides variables in the parent request, + # therefore we have to explicitly set this variable again so that when the parent request + # resumes it has the correct value set for this variable so that Lua can pick backend correctly + set $proxy_upstream_name {{ buildUpstreamName $location | quote }}; + + proxy_pass_request_body off; + proxy_set_header Content-Length ""; + proxy_set_header X-Forwarded-Proto ""; + proxy_set_header X-Request-ID $req_id; + + {{ if $externalAuth.Method }} + proxy_method {{ $externalAuth.Method }}; + proxy_set_header X-Original-URI $request_uri; + proxy_set_header X-Scheme $pass_access_scheme; + {{ end }} + + proxy_set_header Host {{ $externalAuth.Host }}; + proxy_set_header X-Original-URL $scheme://$http_host$request_uri; + proxy_set_header X-Original-Method $request_method; + proxy_set_header X-Sent-From "nginx-ingress-controller"; + proxy_set_header X-Real-IP $remote_addr; + {{ if and $all.Cfg.UseForwardedHeaders $all.Cfg.ComputeFullForwardedFor }} + proxy_set_header X-Forwarded-For $full_x_forwarded_for; + {{ else }} + proxy_set_header X-Forwarded-For $remote_addr; + {{ end }} + + {{ if $externalAuth.RequestRedirect }} + proxy_set_header X-Auth-Request-Redirect {{ $externalAuth.RequestRedirect }}; + {{ else }} + proxy_set_header X-Auth-Request-Redirect $request_uri; + {{ end }} + + {{ if $externalAuth.AuthCacheKey }} + proxy_buffering "on"; + {{ else }} + proxy_buffering {{ $location.Proxy.ProxyBuffering }}; + {{ end }} + proxy_buffer_size {{ $location.Proxy.BufferSize }}; + proxy_buffers {{ $location.Proxy.BuffersNumber }} {{ $location.Proxy.BufferSize }}; + proxy_request_buffering {{ $location.Proxy.RequestBuffering }}; + + proxy_ssl_server_name on; + proxy_pass_request_headers on; + {{ if isValidByteSize $location.Proxy.BodySize true }} + client_max_body_size {{ $location.Proxy.BodySize }}; + {{ end }} + {{ if isValidByteSize $location.ClientBodyBufferSize false }} + client_body_buffer_size {{ $location.ClientBodyBufferSize }}; + {{ end }} + + # Pass the extracted client certificate to the auth provider + {{ if not (empty $server.CertificateAuth.CAFileName) }} + {{ if $server.CertificateAuth.PassCertToUpstream }} + proxy_set_header ssl-client-cert $ssl_client_escaped_cert; + {{ end }} + proxy_set_header ssl-client-verify $ssl_client_verify; + proxy_set_header ssl-client-subject-dn $ssl_client_s_dn; + proxy_set_header ssl-client-issuer-dn $ssl_client_i_dn; + {{ end }} + + {{- range $line := buildAuthProxySetHeaders $externalAuth.ProxySetHeaders}} + {{ $line }} + {{- end }} + + {{ if not (empty $externalAuth.AuthSnippet) }} + {{ $externalAuth.AuthSnippet }} + {{ end }} + + {{ if and (eq $applyAuthUpstream true) (eq $applyGlobalAuth false) }} + {{ $authUpstreamName := buildAuthUpstreamName $location $server.Hostname }} + # The target is an upstream with HTTP keepalive, that is why the + # Connection header is cleared and the HTTP version is set to 1.1 as + # the Nginx documentation suggests: + # http://nginx.org/en/docs/http/ngx_http_upstream_module.html#keepalive + proxy_http_version 1.1; + proxy_set_header Connection ""; + set $target {{ changeHostPort $externalAuth.URL $authUpstreamName }}; + {{ else }} + proxy_http_version {{ $location.Proxy.ProxyHTTPVersion }}; + set $target {{ $externalAuth.URL }}; + {{ end }} + proxy_pass $target; + } + {{ end }} + + {{ if isLocationAllowed $location }} + {{ if $externalAuth.SigninURL }} + location {{ buildAuthSignURLLocation $location.Path $externalAuth.SigninURL }} { + internal; + + add_header Set-Cookie $auth_cookie; + + {{ if $location.CorsConfig.CorsEnabled }} + {{ template "CORS" $location }} + {{ end }} + + # Ensure that modsecurity will not run on an internal location as this is not accessible from outside + {{ if $all.Cfg.EnableModsecurity }} + modsecurity off; + {{ end }} + + return 302 {{ buildAuthSignURL $externalAuth.SigninURL $externalAuth.SigninURLRedirectParam }}; + } + {{ end }} + {{ end }} + + location {{ $path }} { + {{ $ing := (getIngressInformation $location.Ingress $server.Hostname $location.IngressPath) }} + set $namespace {{ $ing.Namespace | quote}}; + set $ingress_name {{ $ing.Rule | quote }}; + set $service_name {{ $ing.Service | quote }}; + set $service_port {{ $ing.ServicePort | quote }}; + set $location_path {{ $ing.Path | escapeLiteralDollar | quote }}; + + {{ buildOpentelemetryForLocation $all.Cfg.EnableOpentelemetry $all.Cfg.OpentelemetryTrustIncomingSpan $location }} + + {{ if $location.Mirror.Source }} + mirror {{ $location.Mirror.Source }}; + mirror_request_body {{ $location.Mirror.RequestBody }}; + {{ end }} + + {{ locationConfigForLua $location $all }} + + rewrite_by_lua_file /etc/nginx/lua/nginx/ngx_rewrite.lua; + + header_filter_by_lua_file /etc/nginx/lua/nginx/ngx_conf_srv_hdr_filter.lua; + + log_by_lua_file /etc/nginx/lua/nginx/ngx_conf_log_block.lua; + + {{ if not $location.Logs.Access }} + access_log off; + {{ end }} + + {{ if $location.Logs.Rewrite }} + rewrite_log on; + {{ end }} + + {{ if $location.HTTP2PushPreload }} + http2_push_preload on; + {{ end }} + + port_in_redirect {{ if $location.UsePortInRedirects }}on{{ else }}off{{ end }}; + + set $balancer_ewma_score -1; + set $proxy_upstream_name {{ buildUpstreamName $location | quote }}; + set $proxy_host $proxy_upstream_name; + set $pass_access_scheme $scheme; + + {{ if $all.Cfg.UseProxyProtocol }} + set $pass_server_port $proxy_protocol_server_port; + {{ else }} + set $pass_server_port $server_port; + {{ end }} + + set $best_http_host $http_host; + set $pass_port $pass_server_port; + + set $proxy_alternative_upstream_name ""; + + {{ buildModSecurityForLocation $all.Cfg $location }} + + {{ if isLocationAllowed $location }} + {{ if gt (len $location.Denylist.CIDR) 0 }} + {{ range $ip := $location.Denylist.CIDR }} + deny {{ $ip }};{{ end }} + {{ end }} + {{ if gt (len $location.Allowlist.CIDR) 0 }} + {{ range $ip := $location.Allowlist.CIDR }} + allow {{ $ip }};{{ end }} + deny all; + {{ end }} + + {{ if $location.CorsConfig.CorsEnabled }} + {{ template "CORS" $location }} + {{ end }} + + {{ if not (isLocationInLocationList $location $all.Cfg.NoAuthLocations) }} + {{ if $authPath }} + # this location requires authentication + {{ if and (eq $applyAuthUpstream true) (eq $applyGlobalAuth false) }} + set $auth_cookie ''; + add_header Set-Cookie $auth_cookie; + {{- range $line := buildAuthResponseHeaders $proxySetHeader $externalAuth.ResponseHeaders true }} + {{ $line }} + {{- end }} + # `auth_request` module does not support HTTP keepalives in upstream block: + # https://trac.nginx.org/nginx/ticket/1579 + set $auth_path '{{ $authPath }}'; + set $auth_keepalive_share_vars {{ $externalAuth.KeepaliveShareVars }}; + set $auth_response_headers '{{ buildAuthUpstreamLuaHeaders $externalAuth.ResponseHeaders }}'; + access_by_lua_file /etc/nginx/lua/nginx/ngx_conf_external_auth.lua; + {{ else }} + auth_request {{ $authPath }}; + auth_request_set $auth_cookie $upstream_http_set_cookie; + {{ if $externalAuth.AlwaysSetCookie }} + add_header Set-Cookie $auth_cookie always; + {{ else }} + add_header Set-Cookie $auth_cookie; + {{ end }} + {{- range $line := buildAuthResponseHeaders $proxySetHeader $externalAuth.ResponseHeaders false }} + {{ $line }} + {{- end }} + {{ end }} + {{ end }} + + {{ if $externalAuth.SigninURL }} + set_escape_uri $escaped_request_uri $request_uri; + error_page 401 = {{ buildAuthSignURLLocation $location.Path $externalAuth.SigninURL }}; + {{ end }} + + {{ if $location.BasicDigestAuth.Secured }} + {{ if eq $location.BasicDigestAuth.Type "basic" }} + auth_basic {{ $location.BasicDigestAuth.Realm | quote }}; + auth_basic_user_file {{ $location.BasicDigestAuth.File }}; + {{ else }} + auth_digest {{ $location.BasicDigestAuth.Realm | quote }}; + auth_digest_user_file {{ $location.BasicDigestAuth.File }}; + {{ end }} + {{ $proxySetHeader }} Authorization ""; + {{ end }} + {{ end }} + + {{/* if the location contains a rate limit annotation, create one */}} + {{ $limits := buildRateLimit $location }} + {{ range $limit := $limits }} + {{ $limit }}{{ end }} + + {{ if isValidByteSize $location.Proxy.BodySize true }} + client_max_body_size {{ $location.Proxy.BodySize }}; + {{ end }} + {{ if isValidByteSize $location.ClientBodyBufferSize false }} + client_body_buffer_size {{ $location.ClientBodyBufferSize }}; + {{ end }} + + {{/* By default use vhost as Host to upstream, but allow overrides */}} + {{ if not (empty $location.UpstreamVhost) }} + {{ $proxySetHeader }} Host {{ $location.UpstreamVhost | quote }}; + {{ else }} + {{ $proxySetHeader }} Host $best_http_host; + {{ end }} + + # Pass the extracted client certificate to the backend + {{ if not (empty $server.CertificateAuth.CAFileName) }} + {{ if $server.CertificateAuth.PassCertToUpstream }} + {{ $proxySetHeader }} ssl-client-cert $ssl_client_escaped_cert; + {{ end }} + {{ $proxySetHeader }} ssl-client-verify $ssl_client_verify; + {{ $proxySetHeader }} ssl-client-subject-dn $ssl_client_s_dn; + {{ $proxySetHeader }} ssl-client-issuer-dn $ssl_client_i_dn; + {{ end }} + + # Allow websocket connections + {{ $proxySetHeader }} Upgrade $http_upgrade; + {{ if $location.Connection.Enabled}} + {{ $proxySetHeader }} Connection {{ $location.Connection.Header }}; + {{ else }} + {{ $proxySetHeader }} Connection $connection_upgrade; + {{ end }} + + {{ $proxySetHeader }} X-Request-ID $req_id; + {{ $proxySetHeader }} X-Real-IP $remote_addr; + {{ if and $all.Cfg.UseForwardedHeaders $all.Cfg.ComputeFullForwardedFor }} + {{ $proxySetHeader }} X-Forwarded-For $full_x_forwarded_for; + {{ else }} + {{ $proxySetHeader }} X-Forwarded-For $remote_addr; + {{ end }} + {{ $proxySetHeader }} X-Forwarded-Host $best_http_host; + {{ $proxySetHeader }} X-Forwarded-Port $pass_port; + {{ $proxySetHeader }} X-Forwarded-Proto $pass_access_scheme; + {{ $proxySetHeader }} X-Forwarded-Scheme $pass_access_scheme; + {{ if $all.Cfg.ProxyAddOriginalURIHeader }} + {{ $proxySetHeader }} X-Original-URI $request_uri; + {{ end }} + {{ $proxySetHeader }} X-Scheme $pass_access_scheme; + + # Pass the original X-Forwarded-For + {{ $proxySetHeader }} X-Original-Forwarded-For {{ buildForwardedFor $all.Cfg.ForwardedForHeader }}; + + # mitigate HTTPoxy Vulnerability + # https://www.nginx.com/blog/mitigating-the-httpoxy-vulnerability-with-nginx/ + {{ $proxySetHeader }} Proxy ""; + + # Custom headers to proxied server + {{ range $k, $v := $all.ProxySetHeaders }} + {{ $proxySetHeader }} {{ $k }} {{ $v | quote }}; + {{ end }} + + proxy_connect_timeout {{ $location.Proxy.ConnectTimeout }}s; + proxy_send_timeout {{ $location.Proxy.SendTimeout }}s; + proxy_read_timeout {{ $location.Proxy.ReadTimeout }}s; + + proxy_buffering {{ $location.Proxy.ProxyBuffering }}; + proxy_buffer_size {{ $location.Proxy.BufferSize }}; + proxy_buffers {{ $location.Proxy.BuffersNumber }} {{ $location.Proxy.BufferSize }}; + {{ if isValidByteSize $location.Proxy.ProxyMaxTempFileSize true }} + proxy_max_temp_file_size {{ $location.Proxy.ProxyMaxTempFileSize }}; + {{ end }} + proxy_request_buffering {{ $location.Proxy.RequestBuffering }}; + proxy_http_version {{ $location.Proxy.ProxyHTTPVersion }}; + + proxy_cookie_domain {{ $location.Proxy.CookieDomain }}; + proxy_cookie_path {{ $location.Proxy.CookiePath }}; + + # In case of errors try the next upstream server before returning an error + proxy_next_upstream {{ buildNextUpstream $location.Proxy.NextUpstream $all.Cfg.RetryNonIdempotent }}; + proxy_next_upstream_timeout {{ $location.Proxy.NextUpstreamTimeout }}; + proxy_next_upstream_tries {{ $location.Proxy.NextUpstreamTries }}; + + {{ if or (eq $location.BackendProtocol "GRPC") (eq $location.BackendProtocol "GRPCS") }} + # Grpc settings + grpc_connect_timeout {{ $location.Proxy.ConnectTimeout }}s; + grpc_send_timeout {{ $location.Proxy.SendTimeout }}s; + grpc_read_timeout {{ $location.Proxy.ReadTimeout }}s; + {{ end }} + + {{/* Add any additional configuration defined */}} + {{ $location.ConfigurationSnippet }} + + {{ if not (empty $all.Cfg.LocationSnippet) }} + # Custom code snippet configured in the configuration configmap + {{ $all.Cfg.LocationSnippet }} + {{ end }} + + {{ if $location.CustomHeaders }} + # Custom Response Headers + {{ range $k, $v := $location.CustomHeaders.Headers }} + more_set_headers {{ printf "%s: %s" $k $v | escapeLiteralDollar | quote }}; + {{ end }} + {{ end }} + + {{/* if we are sending the request to a custom default backend, we add the required headers */}} + {{ if (hasPrefix $location.Backend "custom-default-backend-") }} + proxy_set_header X-Code 503; + proxy_set_header X-Format $http_accept; + proxy_set_header X-Namespace $namespace; + proxy_set_header X-Ingress-Name $ingress_name; + proxy_set_header X-Service-Name $service_name; + proxy_set_header X-Service-Port $service_port; + proxy_set_header X-Request-ID $req_id; + {{ end }} + + {{ if $location.Satisfy }} + satisfy {{ $location.Satisfy }}; + {{ end }} + + {{/* if a location-specific error override is set, add the proxy_intercept here */}} + {{ if and $location.CustomHTTPErrors (not $location.DisableProxyInterceptErrors) }} + # Custom error pages per ingress + proxy_intercept_errors on; + {{ end }} + + {{ range $errCode := $location.CustomHTTPErrors }} + error_page {{ $errCode }} = @custom_{{ $location.DefaultBackendUpstreamName }}_{{ $errCode }};{{ end }} + + {{ if (eq $location.BackendProtocol "FCGI") }} + include /etc/nginx/fastcgi_params; + {{ end }} + {{- if $location.FastCGI.Index -}} + fastcgi_index {{ $location.FastCGI.Index | quote }}; + {{- end -}} + {{ range $k, $v := $location.FastCGI.Params }} + fastcgi_param {{ $k }} {{ $v | quote }}; + {{ end }} + + {{ if not (empty $location.Redirect.URL) }} + return {{ $location.Redirect.Code }} {{ $location.Redirect.URL }}; + {{ end }} + + {{ buildProxyPass $server.Hostname $all.Backends $location }} + {{ if (or (eq $location.Proxy.ProxyRedirectFrom "default") (eq $location.Proxy.ProxyRedirectFrom "off")) }} + proxy_redirect {{ $location.Proxy.ProxyRedirectFrom }}; + {{ else if not (eq $location.Proxy.ProxyRedirectTo "off") }} + proxy_redirect {{ $location.Proxy.ProxyRedirectFrom }} {{ $location.Proxy.ProxyRedirectTo }}; + {{ end }} + {{ else }} + # Location denied. Reason: {{ $location.Denied | quote }} + return 503; + {{ end }} + {{ if not (empty $location.ProxySSL.CAFileName) }} + # PEM sha: {{ $location.ProxySSL.CASHA }} + proxy_ssl_trusted_certificate {{ $location.ProxySSL.CAFileName }}; + proxy_ssl_ciphers {{ $location.ProxySSL.Ciphers }}; + proxy_ssl_protocols {{ $location.ProxySSL.Protocols }}; + proxy_ssl_verify {{ $location.ProxySSL.Verify }}; + proxy_ssl_verify_depth {{ $location.ProxySSL.VerifyDepth }}; + {{ end }} + + {{ if not (empty $location.ProxySSL.ProxySSLName) }} + proxy_ssl_name {{ $location.ProxySSL.ProxySSLName }}; + {{ end }} + {{ if not (empty $location.ProxySSL.ProxySSLServerName) }} + proxy_ssl_server_name {{ $location.ProxySSL.ProxySSLServerName }}; + {{ end }} + + {{ if not (empty $location.ProxySSL.PemFileName) }} + proxy_ssl_certificate {{ $location.ProxySSL.PemFileName }}; + proxy_ssl_certificate_key {{ $location.ProxySSL.PemFileName }}; + {{ end }} + } + {{ end }} + {{ end }} + + {{ if eq $server.Hostname "_" }} + # health checks in cloud providers require the use of port {{ $all.ListenPorts.HTTP }} + location {{ $all.HealthzURI }} { + + {{ if $all.Cfg.EnableOpentelemetry }} + opentelemetry off; + {{ end }} + + access_log off; + return 200; + } + + # this is required to avoid error if nginx is being monitored + # with an external software (like sysdig) + location /nginx_status { + + {{ if $all.Cfg.EnableOpentelemetry }} + opentelemetry off; + {{ end }} + + {{ range $v := $all.NginxStatusIpv4Whitelist }} + allow {{ $v }}; + {{ end }} + {{ if $all.IsIPV6Enabled -}} + {{ range $v := $all.NginxStatusIpv6Whitelist }} + allow {{ $v }}; + {{ end }} + {{ end -}} + deny all; + + access_log off; + stub_status on; + } + + {{ end }} + +{{ end }} diff --git a/kubernetes/main/apps/oauth2-proxy/app/externalsecret.yaml b/kubernetes/main/apps/oauth2-proxy/app/externalsecret.yaml new file mode 100644 index 000000000..8d1c0705b --- /dev/null +++ b/kubernetes/main/apps/oauth2-proxy/app/externalsecret.yaml @@ -0,0 +1,25 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/external-secrets.io/externalsecret_v1beta1.json +apiVersion: external-secrets.io/v1beta1 +kind: ExternalSecret +metadata: + name: &secret oauth2-secret +spec: + refreshInterval: 5m + secretStoreRef: + kind: ClusterSecretStore + name: vault-backend + target: + name: *secret + template: + engineVersion: v2 + data: + redis-password: "{{ .REDIS_PASSWORD }}" + cookie-secret: "{{ .OAUTH2_COOKIE_SECRET }}" + client-secret: "{{ .OAUTH2_CLIENT_SECRET }}" + client-id: "{{ .OAUTH2_CLIENT_ID }}" + dataFrom: + - extract: + key: secrets/redis + - extract: + key: secrets/oauth2 diff --git a/kubernetes/main/apps/oauth2-proxy/app/helmrelease.yaml b/kubernetes/main/apps/oauth2-proxy/app/helmrelease.yaml new file mode 100644 index 000000000..610b4fdbc --- /dev/null +++ b/kubernetes/main/apps/oauth2-proxy/app/helmrelease.yaml @@ -0,0 +1,447 @@ +--- +apiVersion: helm.toolkit.fluxcd.io/v2 +kind: HelmRelease +metadata: + name: oauth2-proxy +spec: + interval: 30m + chart: + spec: + chart: oauth2-proxy + version: 7.7.29 + sourceRef: + kind: HelmRepository + name: oauth2-proxy + namespace: flux-system + install: + remediation: + retries: 3 + upgrade: + cleanupOnFail: true + remediation: + strategy: rollback + retries: 3 + values: + global: {} + # To help compatibility with other charts which use global.imagePullSecrets. + # global: + # imagePullSecrets: + # - name: pullSecret1 + # - name: pullSecret2 + + ## Override the deployment namespace + ## + namespaceOverride: "" + + # Force the target Kubernetes version (it uses Helm `.Capabilities` if not set). + # This is especially useful for `helm template` as capabilities are always empty + # due to the fact that it doesn't query an actual cluster + kubeVersion: + + config: + annotations: {} + existingSecret: oauth2-secret + cookieName: "_oauth2_proxy" + google: {} + # adminEmail: xxxx + # useApplicationDefaultCredentials: true + # targetPrincipal: xxxx + # serviceAccountJson: xxxx + # Alternatively, use an existing secret (see google-secret.yaml for required fields) + # Example: + # existingSecret: google-secret + # groups: [] + # Example: + # - group1@example.com + # - group2@example.com + # Default configuration, to be overridden + configFile: |- + email_domains = [ "*" ] + upstreams = [ "file:///dev/null" ] + reverse_proxy = true + scope = "openid profile email" + cookie_secure = true + provider = "keycloak-oidc" + oidc_issuer_url = "https://auth.${PUBLIC_DOMAIN}/realms/DarkfellaNET" + code_challenge_method = "S256" + cookie_domains = ".${PUBLIC_DOMAIN}" + whitelist_domains = ".${PUBLIC_DOMAIN}" + backend_logout_url = "https://auth.${PUBLIC_DOMAIN}/realms/DarkfellaNET/protocol/openid-connect/logout?id_token_hint={id_token}" + skip_provider_button = true + # Custom configuration file: oauth2_proxy.cfg + # configFile: |- + # pass_basic_auth = false + # pass_access_token = true + # Use an existing config map (see configmap.yaml for required fields) + # Example: + # existingConfig: config + + alphaConfig: + enabled: false + # Add config annotations + annotations: {} + # Arbitrary configuration data to append to the server section + serverConfigData: {} + # Arbitrary configuration data to append to the metrics section + metricsConfigData: {} + # Arbitrary configuration data to append + configData: {} + # Arbitrary configuration to append + # This is treated as a Go template and rendered with the root context + configFile: "" + # Use an existing config map (see secret-alpha.yaml for required fields) + existingConfig: ~ + # Use an existing secret + existingSecret: ~ + + image: + repository: "quay.io/oauth2-proxy/oauth2-proxy" + tag: "v7.7.1-alpine@sha256:a6a1e44374d5d9b72cddf6e3d1177361b91698ae9c6da7e247139094494d3b93" + pullPolicy: "IfNotPresent" + command: [] + + # Optionally specify an array of imagePullSecrets. + # Secrets must be manually created in the namespace. + # ref: https://kubernetes.io/docs/concepts/containers/images/#specifying-imagepullsecrets-on-a-pod + imagePullSecrets: [] + # - name: myRegistryKeySecretName + + # Set a custom containerPort if required. + # This will default to 4180 if this value is not set and the httpScheme set to http + # This will default to 4443 if this value is not set and the httpScheme set to https + # containerPort: 4180 + + extraArgs: {} + extraEnv: [] + + envFrom: [] + # Load environment variables from a ConfigMap(s) and/or Secret(s) + # that already exists (created and managed by you). + # ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-pod-configmap/#configure-all-key-value-pairs-in-a-configmap-as-container-environment-variables + # + # PS: Changes in these ConfigMaps or Secrets will not be automatically + # detected and you must manually restart the relevant Pods after changes. + # + # - configMapRef: + # name: special-config + # - secretRef: + # name: special-config-secret + + # -- Custom labels to add into metadata + customLabels: {} + + # To authorize individual email addresses + # That is part of extraArgs but since this needs special treatment we need to do a separate section + authenticatedEmailsFile: + enabled: false + # Defines how the email addresses file will be projected, via a configmap or secret + persistence: configmap + # template is the name of the configmap what contains the email user list but has been configured without this chart. + # It's a simpler way to maintain only one configmap (user list) instead changing it for each oauth2-proxy service. + # Be aware the value name in the extern config map in data needs to be named to "restricted_user_access" or to the + # provided value in restrictedUserAccessKey field. + template: "" + # The configmap/secret key under which the list of email access is stored + # Defaults to "restricted_user_access" if not filled-in, but can be overridden to allow flexibility + restrictedUserAccessKey: "" + # One email per line + # example: + # restricted_access: |- + # name1@domain + # name2@domain + # If you override the config with restricted_access it will configure a user list within this chart what takes care of the + # config map resource. + restricted_access: "" + annotations: {} + # helm.sh/resource-policy: keep + + service: + type: ClusterIP + # when service.type is ClusterIP ... + # clusterIP: 192.0.2.20 + # when service.type is LoadBalancer ... + # loadBalancerIP: 198.51.100.40 + # loadBalancerSourceRanges: 203.0.113.0/24 + # when service.type is NodePort ... + # nodePort: 80 + portNumber: 80 + # Protocol set on the service + appProtocol: http + annotations: {} + # foo.io/bar: "true" + # configure externalTrafficPolicy + externalTrafficPolicy: "" + # configure internalTrafficPolicy + internalTrafficPolicy: "" + + ## Create or use ServiceAccount + serviceAccount: + ## Specifies whether a ServiceAccount should be created + enabled: true + ## The name of the ServiceAccount to use. + ## If not set and create is true, a name is generated using the fullname template + name: + automountServiceAccountToken: true + annotations: {} + + ingress: + enabled: false + + resources: {} + # limits: + # cpu: 100m + # memory: 300Mi + # requests: + # cpu: 100m + # memory: 300Mi + + extraVolumes: [] + # - name: ca-bundle-cert + # secret: + # secretName: + + extraVolumeMounts: [] + # - mountPath: /etc/ssl/certs/ + # name: ca-bundle-cert + + # Additional containers to be added to the pod. + extraContainers: [] + # - name: my-sidecar + # image: nginx:latest + + priorityClassName: "" + + # hostAliases is a list of aliases to be added to /etc/hosts for network name resolution + hostAliases: [] + # - ip: "10.xxx.xxx.xxx" + # hostnames: + # - "auth.example.com" + # - ip: 127.0.0.1 + # hostnames: + # - chart-example.local + # - example.local + + # [TopologySpreadConstraints](https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/) configuration. + # Ref: https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-v1/#scheduling + # topologySpreadConstraints: [] + + # Affinity for pod assignment + # Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity + # affinity: {} + + # Tolerations for pod assignment + # Ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ + tolerations: [] + + # Node labels for pod assignment + # Ref: https://kubernetes.io/docs/user-guide/node-selection/ + nodeSelector: {} + + # Whether to use secrets instead of environment values for setting up OAUTH2_PROXY variables + proxyVarsAsSecrets: true + + # Configure Kubernetes liveness and readiness probes. + # Ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/ + # Disable both when deploying with Istio 1.0 mTLS. https://istio.io/help/faq/security/#k8s-health-checks + livenessProbe: + enabled: true + initialDelaySeconds: 0 + timeoutSeconds: 1 + + readinessProbe: + enabled: true + initialDelaySeconds: 0 + timeoutSeconds: 5 + periodSeconds: 10 + successThreshold: 1 + + # Configure Kubernetes security context for container + # Ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ + securityContext: + enabled: true + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + readOnlyRootFilesystem: true + runAsNonRoot: true + runAsUser: 2000 + runAsGroup: 2000 + seccompProfile: + type: RuntimeDefault + + deploymentAnnotations: {} + podAnnotations: {} + podLabels: {} + replicaCount: 1 + revisionHistoryLimit: 10 + strategy: {} + + ## PodDisruptionBudget settings + ## ref: https://kubernetes.io/docs/concepts/workloads/pods/disruptions/ + podDisruptionBudget: + enabled: true + minAvailable: 1 + + ## Horizontal Pod Autoscaling + ## ref: https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale/ + autoscaling: + enabled: false + minReplicas: 1 + maxReplicas: 10 + targetCPUUtilizationPercentage: 80 + # targetMemoryUtilizationPercentage: 80 + annotations: {} + + podSecurityContext: {} + + httpScheme: http + + initContainers: + waitForRedis: + enabled: true + image: + repository: "alpine" + tag: "latest" + pullPolicy: "IfNotPresent" + # uses the kubernetes version of the cluster + # the chart is deployed on, if not set + kubectlVersion: "" + securityContext: + enabled: true + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + readOnlyRootFilesystem: true + runAsNonRoot: true + runAsUser: 65534 + runAsGroup: 65534 + seccompProfile: + type: RuntimeDefault + timeout: 180 + resources: {} + # limits: + # cpu: 100m + # memory: 300Mi + # requests: + # cpu: 100m + # memory: 300Mi + + # Additionally authenticate against a htpasswd file. Entries must be created with "htpasswd -B" for bcrypt encryption. + # Alternatively supply an existing secret which contains the required information. + htpasswdFile: + enabled: false + existingSecret: "" + entries: [] + # One row for each user + # example: + # entries: + # - testuser:$2y$05$gY6dgXqjuzFhwdhsiFe7seM9q9Tile4Y3E.CBpAZJffkeiLaC21Gy + + sessionStorage: + type: redis + redis: + existingSecret: "oauth2-secret" + passwordKey: "redis-password" + clientType: "standalone" + standalone: + connectionUrl: "redis://redis-master.database.svc.cluster.local:6379" + + redis: + enabled: false + + # Enables apiVersion deprecation checks + checkDeprecation: true + + # Allows graceful shutdown + # terminationGracePeriodSeconds: 65 + # lifecycle: + # preStop: + # exec: + # command: [ "sh", "-c", "sleep 60" ] + + metrics: + # Enable Prometheus metrics endpoint + enabled: true + # Serve Prometheus metrics on this port + port: 44180 + # when service.type is NodePort ... + # nodePort: 44180 + # Protocol set on the service for the metrics port + service: + appProtocol: http + serviceMonitor: + # Enable Prometheus Operator ServiceMonitor + enabled: false + # Define the namespace where to deploy the ServiceMonitor resource + namespace: "" + # Prometheus Instance definition + prometheusInstance: default + # Prometheus scrape interval + interval: 60s + # Prometheus scrape timeout + scrapeTimeout: 30s + # Add custom labels to the ServiceMonitor resource + labels: {} + + ## scheme: HTTP scheme to use for scraping. Can be used with `tlsConfig` for example if using istio mTLS. + scheme: "" + + ## tlsConfig: TLS configuration to use when scraping the endpoint. For example if using istio mTLS. + ## Of type: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#tlsconfig + tlsConfig: {} + + ## bearerTokenFile: Path to bearer token file. + bearerTokenFile: "" + + ## Used to pass annotations that are used by the Prometheus installed in your cluster to select Service Monitors to work with + ## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#prometheusspec + annotations: {} + + ## Metric relabel configs to apply to samples before ingestion. + ## [Metric Relabeling](https://prometheus.io/docs/prometheus/latest/configuration/configuration/#metric_relabel_configs) + metricRelabelings: [] + # - action: keep + # regex: 'kube_(daemonset|deployment|pod|namespace|node|statefulset).+' + # sourceLabels: [__name__] + + ## Relabel configs to apply to samples before ingestion. + ## [Relabeling](https://prometheus.io/docs/prometheus/latest/configuration/configuration/#relabel_config) + relabelings: [] + # - sourceLabels: [__meta_kubernetes_pod_node_name] + # separator: ; + # regex: ^(.*)$ + # targetLabel: nodename + # replacement: $1 + # action: replace + + # Extra K8s manifests to deploy + extraObjects: [] + # - apiVersion: secrets-store.csi.x-k8s.io/v1 + # kind: SecretProviderClass + # metadata: + # name: oauth2-proxy-secrets-store + # spec: + # provider: aws + # parameters: + # objects: | + # - objectName: "oauth2-proxy" + # objectType: "secretsmanager" + # jmesPath: + # - path: "client_id" + # objectAlias: "client-id" + # - path: "client_secret" + # objectAlias: "client-secret" + # - path: "cookie_secret" + # objectAlias: "cookie-secret" + # secretObjects: + # - data: + # - key: client-id + # objectName: client-id + # - key: client-secret + # objectName: client-secret + # - key: cookie-secret + # objectName: cookie-secret + # secretName: oauth2-proxy-secrets-store + # type: Opaque diff --git a/kubernetes/main/apps/oauth2-proxy/app/ingress-external.yaml b/kubernetes/main/apps/oauth2-proxy/app/ingress-external.yaml new file mode 100644 index 000000000..60c3bddec --- /dev/null +++ b/kubernetes/main/apps/oauth2-proxy/app/ingress-external.yaml @@ -0,0 +1,20 @@ +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: oauth2-proxy-external + namespace: oauth2-proxy + annotations: + external-dns.alpha.kubernetes.io/ingress-hostname-source: annotation-only +spec: + ingressClassName: external + rules: + - host: vaultwarden.${PUBLIC_DOMAIN} + http: + paths: + - path: /oauth2 + pathType: ImplementationSpecific + backend: + service: + name: oauth2-proxy + port: + number: 80 diff --git a/kubernetes/main/apps/oauth2-proxy/app/ingress-internal.yaml b/kubernetes/main/apps/oauth2-proxy/app/ingress-internal.yaml new file mode 100644 index 000000000..7f7553cb4 --- /dev/null +++ b/kubernetes/main/apps/oauth2-proxy/app/ingress-internal.yaml @@ -0,0 +1,70 @@ +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: oauth2-proxy-internal + namespace: oauth2-proxy + annotations: + external-dns.alpha.kubernetes.io/ingress-hostname-source: annotation-only +spec: + ingressClassName: internal + rules: + - host: radarr.${PUBLIC_DOMAIN} + http: + paths: + - path: /oauth2 + pathType: ImplementationSpecific + backend: + service: + name: oauth2-proxy + port: + number: 80 + - host: sonarr.${PUBLIC_DOMAIN} + http: + paths: + - path: /oauth2 + pathType: ImplementationSpecific + backend: + service: + name: oauth2-proxy + port: + number: 80 + - host: prowlarr.${PUBLIC_DOMAIN} + http: + paths: + - path: /oauth2 + pathType: ImplementationSpecific + backend: + service: + name: oauth2-proxy + port: + number: 80 + - host: qbittorrent.${PUBLIC_DOMAIN} + http: + paths: + - path: /oauth2 + pathType: ImplementationSpecific + backend: + service: + name: oauth2-proxy + port: + number: 80 + - host: sabnzbd.${PUBLIC_DOMAIN} + http: + paths: + - path: /oauth2 + pathType: ImplementationSpecific + backend: + service: + name: oauth2-proxy + port: + number: 80 + - host: bazarr.${PUBLIC_DOMAIN} + http: + paths: + - path: /oauth2 + pathType: ImplementationSpecific + backend: + service: + name: oauth2-proxy + port: + number: 80 diff --git a/kubernetes/main/apps/oauth2-proxy/app/kustomization.yaml b/kubernetes/main/apps/oauth2-proxy/app/kustomization.yaml new file mode 100644 index 000000000..baf4f9649 --- /dev/null +++ b/kubernetes/main/apps/oauth2-proxy/app/kustomization.yaml @@ -0,0 +1,9 @@ +--- +# yaml-language-server: $schema=https://json.schemastore.org/kustomization +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - ./externalsecret.yaml + - ./helmrelease.yaml + - ./ingress-external.yaml + - ./ingress-internal.yaml diff --git a/kubernetes/main/apps/oauth2-proxy/ks.yaml b/kubernetes/main/apps/oauth2-proxy/ks.yaml new file mode 100644 index 000000000..81107efda --- /dev/null +++ b/kubernetes/main/apps/oauth2-proxy/ks.yaml @@ -0,0 +1,20 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/kustomize.toolkit.fluxcd.io/kustomization_v1.json +apiVersion: kustomize.toolkit.fluxcd.io/v1 +kind: Kustomization +metadata: + name: &app oauth2-proxy + namespace: flux-system +spec: + targetNamespace: oauth2-proxy + commonMetadata: + labels: + app.kubernetes.io/name: *app + path: ./kubernetes/main/apps/oauth2-proxy/app + prune: true + sourceRef: + kind: GitRepository + name: home-kubernetes + wait: true + interval: 30m + timeout: 5m diff --git a/kubernetes/main/apps/oauth2-proxy/kustomization.yaml b/kubernetes/main/apps/oauth2-proxy/kustomization.yaml new file mode 100644 index 000000000..ad2040382 --- /dev/null +++ b/kubernetes/main/apps/oauth2-proxy/kustomization.yaml @@ -0,0 +1,8 @@ +--- +# yaml-language-server: $schema=https://json.schemastore.org/kustomization +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + # Pre Flux-Kustomizations + - ./namespace.yaml + - ./ks.yaml diff --git a/kubernetes/main/apps/oauth2-proxy/namespace.yaml b/kubernetes/main/apps/oauth2-proxy/namespace.yaml new file mode 100644 index 000000000..5b602cac9 --- /dev/null +++ b/kubernetes/main/apps/oauth2-proxy/namespace.yaml @@ -0,0 +1,5 @@ +--- +apiVersion: v1 +kind: Namespace +metadata: + name: oauth2-proxy diff --git a/kubernetes/main/apps/observability/gatus/app/externalsecret.yaml b/kubernetes/main/apps/observability/gatus/app/externalsecret.yaml new file mode 100644 index 000000000..38de3b20b --- /dev/null +++ b/kubernetes/main/apps/observability/gatus/app/externalsecret.yaml @@ -0,0 +1,32 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/external-secrets.io/externalsecret_v1beta1.json +apiVersion: external-secrets.io/v1beta1 +kind: ExternalSecret +metadata: + name: &secret gatus-secret +spec: + secretStoreRef: + kind: ClusterSecretStore + name: vault-backend + target: + name: *secret + template: + engineVersion: v2 + data: + PUSHOVER_TOKEN: "{{ .GATUS_PUSHOVER_TOKEN }}" + PUSHOVER_USER_KEY: "{{ .PUSHOVER_USER_KEY }}" + INIT_POSTGRES_DBNAME: gatus + INIT_POSTGRES_HOST: postgres17-rw.database.svc.cluster.local + INIT_POSTGRES_USER: "{{ .GATUS_POSTGRES_USER }}" + INIT_POSTGRES_PASS: "{{ .GATUS_POSTGRES_PASS }}" + GATUS_POSTGRES_ENCODED_PASS: "{{ .GATUS_POSTGRES_ENCODED_PASS }}" + INIT_POSTGRES_SUPER_PASS: "{{ .POSTGRES_SUPER_PASS }}" + OIDC_CLIENT_ID: "{{ .OIDC_CLIENT_ID }}" + OIDC_CLIENT_SECRET: "{{ .OIDC_CLIENT_SECRET }}" + dataFrom: + - extract: + key: secrets/cloudnative-pg + - extract: + key: secrets/pushover + - extract: + key: secrets/gatus diff --git a/kubernetes/main/apps/observability/gatus/app/helmrelease.yaml b/kubernetes/main/apps/observability/gatus/app/helmrelease.yaml new file mode 100644 index 000000000..0a0d50fa5 --- /dev/null +++ b/kubernetes/main/apps/observability/gatus/app/helmrelease.yaml @@ -0,0 +1,138 @@ +--- +# yaml-language-server: $schema=https://raw.githubusercontent.com/bjw-s/helm-charts/main/charts/other/app-template/schemas/helmrelease-helm-v2.schema.json +apiVersion: helm.toolkit.fluxcd.io/v2 +kind: HelmRelease +metadata: + name: gatus +spec: + interval: 30m + chart: + spec: + chart: app-template + version: 3.5.1 + sourceRef: + kind: HelmRepository + name: bjw-s + namespace: flux-system + install: + remediation: + retries: 3 + upgrade: + cleanupOnFail: true + remediation: + strategy: rollback + retries: 3 + values: + controllers: + gatus: + annotations: + reloader.stakater.com/auto: "true" + initContainers: + init-db: + image: + repository: ghcr.io/onedr0p/postgres-init + tag: 16.4@sha256:e41c745b54485341e00efbd27556f0717623a119f0d5107e5ff831aa1322c76f + envFrom: &envFrom + - secretRef: + name: gatus-secret + init-config: + dependsOn: init-db + image: + repository: ghcr.io/kiwigrid/k8s-sidecar + tag: 1.28.0@sha256:4166a019eeafd1f0fef4d867dc5f224f18d84ec8681dbb31f3ca258ecf07bcf2 + env: + FOLDER: /config + LABEL: gatus.io/enabled + NAMESPACE: ALL + RESOURCE: both + UNIQUE_FILENAMES: true + METHOD: WATCH + restartPolicy: Always + resources: &resources + requests: + cpu: 20m + limits: + memory: 256Mi + containers: + app: + image: + repository: ghcr.io/twin/gatus + tag: v5.13.1@sha256:24842a8adebd3dd4bd04a4038ffa27cb2fe72bb50631415e0fb2915063fc1993 + env: + TZ: Europe/Sofia + GATUS_CONFIG_PATH: /config + GATUS_DELAY_START_SECONDS: 5 + WEB_PORT: &port 80 + envFrom: *envFrom + probes: + liveness: &probes + enabled: true + custom: true + spec: + httpGet: + path: /health + port: *port + initialDelaySeconds: 0 + periodSeconds: 10 + timeoutSeconds: 1 + failureThreshold: 3 + readiness: *probes + securityContext: + allowPrivilegeEscalation: false + readOnlyRootFilesystem: true + capabilities: { drop: ["ALL"] } + resources: *resources + defaultPodOptions: + dnsConfig: + options: + - { name: ndots, value: "1" } + securityContext: + runAsNonRoot: true + runAsUser: 65534 + runAsGroup: 65534 + fsGroup: 65534 + fsGroupChangePolicy: OnRootMismatch + seccompProfile: { type: RuntimeDefault } + service: + app: + controller: gatus + ports: + http: + port: *port + serviceMonitor: + app: + serviceName: gatus + endpoints: + - port: http + scheme: http + path: /metrics + interval: 1m + scrapeTimeout: 10s + ingress: + app: + annotations: + external-dns.alpha.kubernetes.io/target: external.${PUBLIC_DOMAIN} + nginx.ingress.kubernetes.io/configuration-snippet: | + more_set_headers "access-control-allow-origin https://status.${PUBLIC_DOMAIN}"; + more_set_headers "Content-Security-Policy: default-src 'none'; script-src 'self' 'sha256-vOq0p1C22jhkdRTdIHA8DSPgcwXkh5mq8FK4cumu/wU='; style-src https://status.${PUBLIC_DOMAIN}/css/app.css; img-src 'self'; connect-src https://status.${PUBLIC_DOMAIN}; manifest-src 'self'"; + className: external + hosts: + - host: status.${PUBLIC_DOMAIN} + paths: + - path: / + service: + identifier: app + port: http + serviceAccount: + create: true + name: gatus + persistence: + config: + type: emptyDir + config-file: + type: configMap + name: gatus-configmap + globalMounts: + - path: /config/config.yaml + subPath: config.yaml + readOnly: true diff --git a/kubernetes/main/apps/observability/gatus/app/kustomization.yaml b/kubernetes/main/apps/observability/gatus/app/kustomization.yaml new file mode 100644 index 000000000..30bf43b95 --- /dev/null +++ b/kubernetes/main/apps/observability/gatus/app/kustomization.yaml @@ -0,0 +1,14 @@ +--- +# yaml-language-server: $schema=https://json.schemastore.org/kustomization +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - ./externalsecret.yaml + - ./rbac.yaml + - ./helmrelease.yaml +configMapGenerator: + - name: gatus-configmap + files: + - config.yaml=./resources/config.yaml +generatorOptions: + disableNameSuffixHash: true diff --git a/kubernetes/main/apps/observability/gatus/app/rbac.yaml b/kubernetes/main/apps/observability/gatus/app/rbac.yaml new file mode 100644 index 000000000..0f12c439b --- /dev/null +++ b/kubernetes/main/apps/observability/gatus/app/rbac.yaml @@ -0,0 +1,22 @@ +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: gatus +rules: + - apiGroups: [""] + resources: ["configmaps", "secrets"] + verbs: ["get", "watch", "list"] +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: gatus +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: gatus +subjects: + - kind: ServiceAccount + name: gatus + namespace: observability diff --git a/kubernetes/main/apps/observability/gatus/app/resources/config.yaml b/kubernetes/main/apps/observability/gatus/app/resources/config.yaml new file mode 100644 index 000000000..401697db9 --- /dev/null +++ b/kubernetes/main/apps/observability/gatus/app/resources/config.yaml @@ -0,0 +1,56 @@ +--- +# Note: Gatus vars should be escaped with $${VAR_NAME} to avoid interpolation by Flux +security: + oidc: + issuer-url: https://auth.${PUBLIC_DOMAIN}/realms/DarkfellaNET + client-id: $${OIDC_CLIENT_ID} + client-secret: $${OIDC_CLIENT_SECRET} + redirect-url: https://status.${PUBLIC_DOMAIN}/authorization-code/callback + scopes: [openid] +web: + port: $${WEB_PORT} +storage: + type: postgres + path: postgres://$${INIT_POSTGRES_USER}:$${GATUS_POSTGRES_ENCODED_PASS}@$${INIT_POSTGRES_HOST}:5432/$${INIT_POSTGRES_DBNAME}?sslmode=disable + caching: true +metrics: true +debug: false +ui: + title: Status | Gatus + header: Status +alerting: + pushover: + title: Gatus + application-token: $${PUSHOVER_TOKEN} + user-key: $${PUSHOVER_USER_KEY} + priority: 1 + default-alert: + description: health-check failed + send-on-resolved: true + failure-threshold: 3 + success-threshold: 3 +connectivity: + checker: + target: 1.1.1.1:53 + interval: 1m +endpoints: + - name: status + group: external + url: https://status.${PUBLIC_DOMAIN} + interval: 10m + client: + dns-resolver: tcp://1.1.1.1:53 + conditions: + - "[STATUS] == 200" + alerts: + - type: pushover + - name: flux-webhook + group: external + url: https://flux-webhook.${PUBLIC_DOMAIN} + interval: 10m + client: + dns-resolver: tcp://1.1.1.1:53 + conditions: + - "[STATUS] == 404" + alerts: + - type: pushover diff --git a/kubernetes/main/apps/observability/gatus/ks.yaml b/kubernetes/main/apps/observability/gatus/ks.yaml new file mode 100644 index 000000000..878b1e591 --- /dev/null +++ b/kubernetes/main/apps/observability/gatus/ks.yaml @@ -0,0 +1,23 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/kustomize.toolkit.fluxcd.io/kustomization_v1.json +apiVersion: kustomize.toolkit.fluxcd.io/v1 +kind: Kustomization +metadata: + name: &app gatus + namespace: flux-system +spec: + targetNamespace: observability + commonMetadata: + labels: + app.kubernetes.io/name: *app + dependsOn: + - name: cloudnative-pg-cluster + - name: external-secrets-stores + path: ./kubernetes/main/apps/observability/gatus/app + prune: true + sourceRef: + kind: GitRepository + name: home-kubernetes + wait: false + interval: 30m + timeout: 5m diff --git a/kubernetes/main/apps/observability/grafana/app/externalsecret.yaml b/kubernetes/main/apps/observability/grafana/app/externalsecret.yaml new file mode 100644 index 000000000..0dca52739 --- /dev/null +++ b/kubernetes/main/apps/observability/grafana/app/externalsecret.yaml @@ -0,0 +1,40 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/external-secrets.io/externalsecret_v1beta1.json +apiVersion: external-secrets.io/v1beta1 +kind: ExternalSecret +metadata: + name: grafana-admin +spec: + secretStoreRef: + kind: ClusterSecretStore + name: vault-backend + target: + name: grafana-admin-secret + template: + engineVersion: v2 + data: + admin-user: "{{ .GRAFANA_ADMIN_USERNAME }}" + admin-password: "{{ .GRAFANA_ADMIN_PASSWORD }}" + dataFrom: + - extract: + key: secrets/grafana +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/external-secrets.io/externalsecret_v1beta1.json +apiVersion: external-secrets.io/v1beta1 +kind: ExternalSecret +metadata: + name: &secret auth-generic-oauth-secret +spec: + secretStoreRef: + kind: ClusterSecretStore + name: vault-backend + target: + name: *secret + template: + engineVersion: v2 + data: + client_id: "{{ .GRAFANA_OAUTH_ID }}" + client_secret: "{{ .GRAFANA_OAUTH_SECRET }}" + dataFrom: + - extract: + key: secrets/grafana diff --git a/kubernetes/main/apps/observability/grafana/app/helmrelease.yaml b/kubernetes/main/apps/observability/grafana/app/helmrelease.yaml new file mode 100644 index 000000000..afd5b4191 --- /dev/null +++ b/kubernetes/main/apps/observability/grafana/app/helmrelease.yaml @@ -0,0 +1,252 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/helm.toolkit.fluxcd.io/helmrelease_v2.json +apiVersion: helm.toolkit.fluxcd.io/v2 +kind: HelmRelease +metadata: + name: grafana +spec: + interval: 30m + chart: + spec: + chart: grafana + version: 8.6.0 + sourceRef: + kind: HelmRepository + name: grafana + namespace: flux-system + install: + remediation: + retries: 3 + upgrade: + cleanupOnFail: true + remediation: + strategy: rollback + retries: 3 + values: + annotations: + reloader.stakater.com/auto: "true" + extraSecretMounts: + - name: auth-generic-oauth-secret-mount + secretName: auth-generic-oauth-secret + defaultMode: 0440 + mountPath: /etc/secrets/auth_generic_oauth + readOnly: true + deploymentStrategy: + type: Recreate + env: + GF_DATE_FORMATS_USE_BROWSER_LOCALE: true + GF_EXPLORE_ENABLED: true + GF_PLUGINS_ALLOW_LOADING_UNSIGNED_PLUGINS: natel-discrete-panel,panodata-map-panel + GF_SECURITY_ANGULAR_SUPPORT_ENABLED: false + GF_SERVER_ROOT_URL: https://grafana.${PUBLIC_DOMAIN} + grafana.ini: + auth: + oauth_auto_login: true + disable_login_form: true + oauth_skip_org_role_update_sync: false + skip_org_role_sync: false + auth.generic_oauth: + name: Keycloak-OAuth + enabled: true + use_refresh_token: true + email_attribute_path: email + login_attribute_path: preferred_username + name_attribute_path: name + client_id: "$__file{/etc/secrets/auth_generic_oauth/client_id}" + client_secret: "$__file{/etc/secrets/auth_generic_oauth/client_secret}" + scopes: "openid email profile roles" + auth_url: "https://auth.${PUBLIC_DOMAIN}/realms/DarkfellaNET/protocol/openid-connect/auth" + token_url: "https://auth.${PUBLIC_DOMAIN}/realms/DarkfellaNET/protocol/openid-connect/token" + api_url: "https://auth.${PUBLIC_DOMAIN}/realms/DarkfellaNET/protocol/openid-connect/userinfo" + signout_redirect_url: "https://auth.${PUBLIC_DOMAIN}/realms/DarkfellaNET/protocol/openid-connect/logout?post_logout_redirect_uri=https%3A%2F%2Fgrafana.${PUBLIC_DOMAIN}%2Flogin" + role_attribute_path: contains(resource_access."46f5398b-91b2-4706-bfc3-5f2f4ad624fc".roles[*], 'admin') && 'Admin' || contains(resource_access."46f5398b-91b2-4706-bfc3-5f2f4ad624fc".roles[*], 'editor') && 'Editor' || 'Viewer' + role_attribute_strict: true + allow_assign_grafana_admin: true + use_pkce: true + users: + auto_assign_org: true + auto_assign_org_role: Viewer + auto_assign_org_id: 1 + security: + disable_initial_admin_creation: true + allow_embedding: false + cookie_secure: true + + analytics: + check_for_updates: false + check_for_plugin_updates: false + reporting_enabled: false + auth.anonymous: + enabled: false + auth.basic: + enabled: false + news: + news_feed_enabled: false + datasources: + datasources.yaml: + apiVersion: 1 + deleteDatasources: + - { name: Alertmanager, orgId: 1 } + - { name: Loki, orgId: 1 } + - { name: Prometheus, orgId: 1 } + datasources: + - name: Prometheus + type: prometheus + uid: prometheus + access: proxy + url: http://prometheus-operated.observability.svc.cluster.local:9090 + jsonData: + timeInterval: 1m + isDefault: true + - name: Loki + type: loki + uid: loki + access: proxy + url: http://loki-headless.observability.svc.cluster.local:3100 + jsonData: + maxLines: 250 + - name: Alertmanager + type: alertmanager + uid: alertmanager + access: proxy + url: http://alertmanager-operated.observability.svc.cluster.local:9093 + jsonData: + implementation: prometheus + dashboardProviders: + dashboardproviders.yaml: + apiVersion: 1 + providers: + - name: default + orgId: 1 + folder: "" + type: file + disableDeletion: false + editable: true + options: + path: /var/lib/grafana/dashboards/default + dashboards: + default: + nginx: + url: https://raw.githubusercontent.com/kubernetes/ingress-nginx/master/deploy/grafana/dashboards/nginx.json + datasource: Prometheus + nginx-request-handling-performance: + url: https://raw.githubusercontent.com/kubernetes/ingress-nginx/master/deploy/grafana/dashboards/request-handling-performance.json + datasource: Prometheus + cert-manager: + url: https://raw.githubusercontent.com/monitoring-mixins/website/master/assets/cert-manager/dashboards/overview.json + datasource: Prometheus + modsecurity-logs: + url: https://raw.githubusercontent.com/Darkfella91/modsecurity-dashboard/main/dashboard.json + datasource: Loki + cloudflared: + # renovate: depName="Cloudflare Tunnels (cloudflared)" + gnetId: 17457 + revision: 6 + datasource: + - { name: DS_PROMETHEUS, value: Prometheus } + external-dns: + # renovate: depName="External-dns" + gnetId: 15038 + revision: 3 + datasource: Prometheus + external-secrets: + url: https://raw.githubusercontent.com/external-secrets/external-secrets/main/docs/snippets/dashboard.json + datasource: Prometheus + flux-cluster: + url: https://raw.githubusercontent.com/fluxcd/flux2-monitoring-example/main/monitoring/configs/dashboards/cluster.json + datasource: Prometheus + flux-control-plane: + url: https://raw.githubusercontent.com/fluxcd/flux2-monitoring-example/main/monitoring/configs/dashboards/control-plane.json + datasource: Prometheus + flux-logs: + url: https://raw.githubusercontent.com/fluxcd/flux2-monitoring-example/main/monitoring/configs/dashboards/logs.json + datasource: Loki + kubernetes-api-server: + # renovate: depName="Kubernetes / System / API Server" + gnetId: 15761 + revision: 18 + datasource: Prometheus + kubernetes-coredns: + # renovate: depName="Kubernetes / System / CoreDNS" + gnetId: 15762 + revision: 18 + datasource: Prometheus + kubernetes-global: + # renovate: depName="Kubernetes / Views / Global" + gnetId: 15757 + revision: 42 + datasource: Prometheus + kubernetes-namespaces: + # renovate: depName="Kubernetes / Views / Namespaces" + gnetId: 15758 + revision: 40 + datasource: Prometheus + kubernetes-nodes: + # renovate: depName="Kubernetes / Views / Nodes" + gnetId: 15759 + revision: 32 + datasource: Prometheus + kubernetes-pods: + # renovate: depName="Kubernetes / Views / Pods" + gnetId: 15760 + revision: 32 + datasource: Prometheus + kubernetes-volumes: + # renovate: depName="K8s / Storage / Volumes / Cluster" + gnetId: 11454 + revision: 14 + datasource: Prometheus + node-exporter-full: + # renovate: depName="Node Exporter Full" + gnetId: 1860 + revision: 37 + datasource: Prometheus + prometheus: + # renovate: depName="Prometheus" + gnetId: 19105 + revision: 5 + datasource: Prometheus + unpackerr: + # renovate: depName="Unpackerr" + gnetId: 18817 + revision: 1 + datasource: + - { name: DS_PROMETHEUS, value: Prometheus } + volsync: + # renovate: depName="VolSync Dashboard" + gnetId: 21356 + revision: 3 + datasource: + - { name: DS_PROMETHEUS, value: Prometheus } + - { name: VAR_REPLICATIONDESTNAME, value: .*-dst } + sidecar: + dashboards: + enabled: true + searchNamespace: ALL + label: grafana_dashboard + folderAnnotation: grafana_folder + provider: + disableDelete: true + foldersFromFilesStructure: true + datasources: + enabled: true + searchNamespace: ALL + labelValue: "" + plugins: + - grafana-clock-panel + - grafana-piechart-panel + - grafana-worldmap-panel + - natel-discrete-panel + - vonage-status-panel + serviceMonitor: + enabled: true + ingress: + enabled: true + annotations: + external-dns.alpha.kubernetes.io/target: internal.${PUBLIC_DOMAIN} + ingressClassName: internal + hosts: ["grafana.${PUBLIC_DOMAIN}"] + persistence: + enabled: false + testFramework: + enabled: false diff --git a/kubernetes/main/apps/observability/grafana/app/kustomization.yaml b/kubernetes/main/apps/observability/grafana/app/kustomization.yaml new file mode 100644 index 000000000..4eed917b9 --- /dev/null +++ b/kubernetes/main/apps/observability/grafana/app/kustomization.yaml @@ -0,0 +1,7 @@ +--- +# yaml-language-server: $schema=https://json.schemastore.org/kustomization +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - ./externalsecret.yaml + - ./helmrelease.yaml diff --git a/kubernetes/main/apps/observability/grafana/ks.yaml b/kubernetes/main/apps/observability/grafana/ks.yaml new file mode 100644 index 000000000..dd21bf3f3 --- /dev/null +++ b/kubernetes/main/apps/observability/grafana/ks.yaml @@ -0,0 +1,22 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/kustomize.toolkit.fluxcd.io/kustomization_v1.json +apiVersion: kustomize.toolkit.fluxcd.io/v1 +kind: Kustomization +metadata: + name: &app grafana + namespace: flux-system +spec: + targetNamespace: observability + commonMetadata: + labels: + app.kubernetes.io/name: *app + dependsOn: + - name: external-secrets-stores + path: ./kubernetes/main/apps/observability/grafana/app + prune: true + sourceRef: + kind: GitRepository + name: home-kubernetes + wait: false + interval: 30m + timeout: 5m diff --git a/kubernetes/main/apps/observability/kube-prometheus-stack/app/externalsecret.yaml b/kubernetes/main/apps/observability/kube-prometheus-stack/app/externalsecret.yaml new file mode 100644 index 000000000..7d61f5181 --- /dev/null +++ b/kubernetes/main/apps/observability/kube-prometheus-stack/app/externalsecret.yaml @@ -0,0 +1,90 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/external-secrets.io/externalsecret_v1beta1.json +apiVersion: external-secrets.io/v1beta1 +kind: ExternalSecret +metadata: + name: alertmanager +spec: + refreshInterval: 5m + secretStoreRef: + kind: ClusterSecretStore + name: vault-backend + target: + name: alertmanager-secret + template: + engineVersion: v2 + data: + # Yo dawg I heard you like go templating so I put go templates in your go templates + alertmanager.yaml: | + global: + resolve_timeout: 5m + route: + group_by: ["alertname", "job"] + group_interval: 10m + group_wait: 1m + receiver: pushover + repeat_interval: 12h + routes: + - receiver: heartbeat + group_interval: 5m + group_wait: 0s + matchers: + - alertname =~ "Watchdog" + repeat_interval: 5m + - receiver: "null" + matchers: + - alertname =~ "InfoInhibitor" + - receiver: pushover + continue: true + matchers: + - severity = "critical" + inhibit_rules: + - equal: ["alertname", "namespace"] + source_matchers: + - severity = "critical" + target_matchers: + - severity = "warning" + receivers: + - name: heartbeat + webhook_configs: + - send_resolved: true + url: "{{ .ALERTMANAGER_CLUSTER_MAIN_HEARTBEAT_URL }}" + - name: "null" + - name: pushover + pushover_configs: + - html: true + message: |- + {{ "{{-" }} range .Alerts {{ "}}" }} + {{ "{{-" }} if ne .Annotations.description "" {{ "}}" }} + {{ "{{" }} .Annotations.description {{ "}}" }} + {{ "{{-" }} else if ne .Annotations.summary "" {{ "}}" }} + {{ "{{" }} .Annotations.summary {{ "}}" }} + {{ "{{-" }} else if ne .Annotations.message "" {{ "}}" }} + {{ "{{" }} .Annotations.message {{ "}}" }} + {{ "{{-" }} else {{ "}}" }} + Alert description not available + {{ "{{-" }} end {{ "}}" }} + {{ "{{-" }} if gt (len .Labels.SortedPairs) 0 {{ "}}" }} + + {{ "{{-" }} range .Labels.SortedPairs {{ "}}" }} + {{ "{{" }} .Name {{ "}}" }}: {{ "{{" }} .Value {{ "}}" }} + {{ "{{-" }} end {{ "}}" }} + + {{ "{{-" }} end {{ "}}" }} + {{ "{{-" }} end {{ "}}" }} + priority: |- + {{ "{{" }} if eq .Status "firing" {{ "}}" }}1{{ "{{" }} else {{ "}}" }}0{{ "{{" }} end {{ "}}" }} + send_resolved: true + sound: gamelan + # ttl: 1d + title: >- + [{{ "{{" }} .Status | toUpper {{ "}}" }}{{ "{{" }} if eq .Status "firing" {{ "}}" }}:{{ "{{" }} .Alerts.Firing | len {{ "}}" }}{{ "{{" }} end {{ "}}" }}] + {{ "{{" }} .CommonLabels.alertname {{ "}}" }} + token: "{{ .ALERTMANAGER_PUSHOVER_TOKEN }}" + url_title: View in Alertmanager + user_key: "{{ .PUSHOVER_USER_KEY }}" + dataFrom: + - extract: + key: secrets/pushover + - extract: + key: secrets/alertmanager diff --git a/kubernetes/main/apps/observability/kube-prometheus-stack/app/helmrelease.yaml b/kubernetes/main/apps/observability/kube-prometheus-stack/app/helmrelease.yaml new file mode 100644 index 000000000..702d25ed1 --- /dev/null +++ b/kubernetes/main/apps/observability/kube-prometheus-stack/app/helmrelease.yaml @@ -0,0 +1,135 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/helm.toolkit.fluxcd.io/helmrelease_v2.json +apiVersion: helm.toolkit.fluxcd.io/v2 +kind: HelmRelease +metadata: + name: kube-prometheus-stack +spec: + interval: 30m + timeout: 15m + chart: + spec: + chart: kube-prometheus-stack + version: 66.2.1 + sourceRef: + kind: HelmRepository + name: prometheus-community + namespace: flux-system + install: + crds: Skip + remediation: + retries: 3 + upgrade: + cleanupOnFail: true + crds: Skip + remediation: + strategy: rollback + retries: 3 + dependsOn: + - name: prometheus-operator-crds + namespace: observability + - name: topolvm + namespace: kube-system + values: + crds: + enabled: false + cleanPrometheusOperatorObjectNames: true + alertmanager: + ingress: + enabled: true + annotations: + external-dns.alpha.kubernetes.io/target: internal.${PUBLIC_DOMAIN} + ingressClassName: internal + hosts: ["alertmanager.${PUBLIC_DOMAIN}"] + pathType: Prefix + alertmanagerSpec: + useExistingSecret: true + configSecret: alertmanager-secret + storage: + volumeClaimTemplate: + spec: + storageClassName: topolvm-provisioner-thin + resources: + requests: + storage: 1Gi + kubeApiServer: + serviceMonitor: + selector: + k8s-app: kube-apiserver + kubeScheduler: + service: + selector: + k8s-app: kube-scheduler + kubeControllerManager: &kubeControllerManager + service: + selector: + k8s-app: kube-controller-manager + kubeEtcd: + <<: *kubeControllerManager # etcd runs on control plane nodes + kubeProxy: + enabled: false + prometheus: + ingress: + enabled: true + annotations: + external-dns.alpha.kubernetes.io/target: internal.${PUBLIC_DOMAIN} + ingressClassName: internal + hosts: ["prometheus.${PUBLIC_DOMAIN}"] + pathType: Prefix + prometheusSpec: + scrapeInterval: 1m # Must match interval in Grafana Helm chart + podMonitorSelector: &selector + matchLabels: null + probeSelector: *selector + ruleSelector: *selector + scrapeConfigSelector: *selector + serviceMonitorSelector: *selector + enableAdminAPI: true + walCompression: true + enableFeatures: + - auto-gomemlimit + - memory-snapshot-on-shutdown + - new-service-discovery-manager + retention: 14d + retentionSize: 70GB + resources: + requests: + cpu: 100m + limits: + memory: 1500Mi + storageSpec: + volumeClaimTemplate: + spec: + storageClassName: topolvm-provisioner-thin + resources: + requests: + storage: 75Gi + prometheus-node-exporter: + fullnameOverride: node-exporter + prometheus: + monitor: + enabled: true + relabelings: + - action: replace + regex: (.*) + replacement: $1 + sourceLabels: ["__meta_kubernetes_pod_node_name"] + targetLabel: kubernetes_node + kube-state-metrics: + fullnameOverride: kube-state-metrics + metricLabelsAllowlist: + - pods=[*] + - deployments=[*] + - persistentvolumeclaims=[*] + prometheus: + monitor: + enabled: true + relabelings: + - action: replace + regex: (.*) + replacement: $1 + sourceLabels: ["__meta_kubernetes_pod_node_name"] + targetLabel: kubernetes_node + grafana: + enabled: false + forceDeployDashboards: true diff --git a/kubernetes/main/apps/observability/kube-prometheus-stack/app/kustomization.yaml b/kubernetes/main/apps/observability/kube-prometheus-stack/app/kustomization.yaml new file mode 100644 index 000000000..9cffb524f --- /dev/null +++ b/kubernetes/main/apps/observability/kube-prometheus-stack/app/kustomization.yaml @@ -0,0 +1,8 @@ +--- +# yaml-language-server: $schema=https://json.schemastore.org/kustomization +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - ./externalsecret.yaml + - ./helmrelease.yaml + - ./prometheusrule.yaml diff --git a/kubernetes/main/apps/observability/kube-prometheus-stack/app/prometheusrule.yaml b/kubernetes/main/apps/observability/kube-prometheus-stack/app/prometheusrule.yaml new file mode 100644 index 000000000..4d880fa20 --- /dev/null +++ b/kubernetes/main/apps/observability/kube-prometheus-stack/app/prometheusrule.yaml @@ -0,0 +1,25 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/monitoring.coreos.com/prometheusrule_v1.json +apiVersion: monitoring.coreos.com/v1 +kind: PrometheusRule +metadata: + name: miscellaneous-rules +spec: + groups: + - name: dockerhub + rules: + - alert: BootstrapRateLimitRisk + annotations: + summary: Kubernetes cluster at risk of being rate limited by dockerhub on bootstrap + expr: count(time() - container_last_seen{image=~"(docker.io).*",container!=""} < 30) > 100 + for: 15m + labels: + severity: critical + - name: oom + rules: + - alert: OOMKilled + annotations: + summary: Container {{ $labels.container }} in pod {{ $labels.namespace }}/{{ $labels.pod }} has been OOMKilled {{ $value }} times in the last 10 minutes. + expr: (kube_pod_container_status_restarts_total - kube_pod_container_status_restarts_total offset 10m >= 1) and ignoring (reason) min_over_time(kube_pod_container_status_last_terminated_reason{reason="OOMKilled"}[10m]) == 1 + labels: + severity: critical diff --git a/kubernetes/main/apps/observability/kube-prometheus-stack/ks.yaml b/kubernetes/main/apps/observability/kube-prometheus-stack/ks.yaml new file mode 100644 index 000000000..60ee96561 --- /dev/null +++ b/kubernetes/main/apps/observability/kube-prometheus-stack/ks.yaml @@ -0,0 +1,22 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/kustomize.toolkit.fluxcd.io/kustomization_v1.json +apiVersion: kustomize.toolkit.fluxcd.io/v1 +kind: Kustomization +metadata: + name: &app kube-prometheus-stack + namespace: flux-system +spec: + targetNamespace: observability + commonMetadata: + labels: + app.kubernetes.io/name: *app + dependsOn: + - name: external-secrets-stores + path: ./kubernetes/main/apps/observability/kube-prometheus-stack/app + prune: true + sourceRef: + kind: GitRepository + name: home-kubernetes + wait: false + interval: 30m + timeout: 15m diff --git a/kubernetes/main/apps/observability/kustomization.yaml b/kubernetes/main/apps/observability/kustomization.yaml new file mode 100644 index 000000000..e75753359 --- /dev/null +++ b/kubernetes/main/apps/observability/kustomization.yaml @@ -0,0 +1,14 @@ +--- +# yaml-language-server: $schema=https://json.schemastore.org/kustomization +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + # Pre Flux-Kustomizations + - ./namespace.yaml + # Flux-Kustomizations + - ./grafana/ks.yaml + - ./kube-prometheus-stack/ks.yaml + - ./loki/ks.yaml + - ./prometheus-operator-crds/ks.yaml + - ./promtail/ks.yaml + - ./gatus/ks.yaml diff --git a/kubernetes/main/apps/observability/loki/app/helmrelease.yaml b/kubernetes/main/apps/observability/loki/app/helmrelease.yaml new file mode 100644 index 000000000..1cbd5e37c --- /dev/null +++ b/kubernetes/main/apps/observability/loki/app/helmrelease.yaml @@ -0,0 +1,85 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/helm.toolkit.fluxcd.io/helmrelease_v2.json +apiVersion: helm.toolkit.fluxcd.io/v2 +kind: HelmRelease +metadata: + name: loki +spec: + interval: 30m + timeout: 15m + chart: + spec: + chart: loki + version: 6.19.0 + sourceRef: + kind: HelmRepository + name: grafana + namespace: flux-system + install: + crds: Skip + remediation: + retries: 3 + upgrade: + cleanupOnFail: true + crds: Skip + remediation: + strategy: rollback + retries: 3 + dependsOn: + - name: topolvm + namespace: kube-system + values: + deploymentMode: SingleBinary + loki: + auth_enabled: false + analytics: + reporting_enabled: false + server: + log_level: info + commonConfig: + replication_factor: 1 + compactor: + working_directory: /var/loki/compactor/retention + delete_request_store: filesystem + retention_enabled: true + ingester: + chunk_encoding: snappy + storage: + type: filesystem + schemaConfig: + configs: + - from: "2024-04-01" # quote + store: tsdb + object_store: filesystem + schema: v13 + index: + prefix: loki_index_ + period: 24h + limits_config: + retention_period: 14d + singleBinary: + replicas: 1 + persistence: + enabled: true + size: 50Gi + gateway: + replicas: 0 + backend: + replicas: 0 + read: + replicas: 0 + write: + replicas: 0 + chunksCache: + enabled: false + resultsCache: + enabled: false + lokiCanary: + enabled: false + test: + enabled: false + sidecar: + image: + repository: ghcr.io/kiwigrid/k8s-sidecar + rules: + searchNamespace: ALL diff --git a/kubernetes/main/apps/observability/loki/app/kustomization.yaml b/kubernetes/main/apps/observability/loki/app/kustomization.yaml new file mode 100644 index 000000000..17cbc72b2 --- /dev/null +++ b/kubernetes/main/apps/observability/loki/app/kustomization.yaml @@ -0,0 +1,6 @@ +--- +# yaml-language-server: $schema=https://json.schemastore.org/kustomization +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - ./helmrelease.yaml diff --git a/kubernetes/main/apps/observability/loki/ks.yaml b/kubernetes/main/apps/observability/loki/ks.yaml new file mode 100644 index 000000000..2c1907079 --- /dev/null +++ b/kubernetes/main/apps/observability/loki/ks.yaml @@ -0,0 +1,20 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/kustomize.toolkit.fluxcd.io/kustomization_v1.json +apiVersion: kustomize.toolkit.fluxcd.io/v1 +kind: Kustomization +metadata: + name: &app loki + namespace: flux-system +spec: + targetNamespace: observability + commonMetadata: + labels: + app.kubernetes.io/name: *app + path: ./kubernetes/main/apps/observability/loki/app + prune: true + sourceRef: + kind: GitRepository + name: home-kubernetes + wait: false + interval: 30m + timeout: 15m diff --git a/kubernetes/main/apps/observability/namespace.yaml b/kubernetes/main/apps/observability/namespace.yaml new file mode 100644 index 000000000..cef355262 --- /dev/null +++ b/kubernetes/main/apps/observability/namespace.yaml @@ -0,0 +1,37 @@ +--- +apiVersion: v1 +kind: Namespace +metadata: + name: observability + labels: + kustomize.toolkit.fluxcd.io/prune: disabled +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/notification.toolkit.fluxcd.io/provider_v1beta3.json +apiVersion: notification.toolkit.fluxcd.io/v1beta3 +kind: Provider +metadata: + name: alert-manager + namespace: observability +spec: + type: alertmanager + address: http://alertmanager-operated.observability.svc.cluster.local:9093/api/v2/alerts/ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/notification.toolkit.fluxcd.io/alert_v1beta3.json +apiVersion: notification.toolkit.fluxcd.io/v1beta3 +kind: Alert +metadata: + name: alert-manager + namespace: observability +spec: + providerRef: + name: alert-manager + eventSeverity: error + eventSources: + - kind: HelmRelease + name: "*" + exclusionList: + - "error.*lookup github\\.com" + - "error.*lookup raw\\.githubusercontent\\.com" + - "dial.*tcp.*timeout" + - "waiting.*socket" + suspend: false diff --git a/kubernetes/main/apps/observability/prometheus-operator-crds/app/helmrelease.yaml b/kubernetes/main/apps/observability/prometheus-operator-crds/app/helmrelease.yaml new file mode 100644 index 000000000..7bddd32ad --- /dev/null +++ b/kubernetes/main/apps/observability/prometheus-operator-crds/app/helmrelease.yaml @@ -0,0 +1,23 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/helm.toolkit.fluxcd.io/helmrelease_v2.json +apiVersion: helm.toolkit.fluxcd.io/v2 +kind: HelmRelease +metadata: + name: prometheus-operator-crds +spec: + interval: 30m + chart: + spec: + chart: prometheus-operator-crds + version: 16.0.0 + sourceRef: + kind: HelmRepository + name: prometheus-community + namespace: flux-system + install: + remediation: + retries: 3 + upgrade: + cleanupOnFail: true + remediation: + retries: 3 diff --git a/kubernetes/main/apps/observability/prometheus-operator-crds/app/kustomization.yaml b/kubernetes/main/apps/observability/prometheus-operator-crds/app/kustomization.yaml new file mode 100644 index 000000000..17cbc72b2 --- /dev/null +++ b/kubernetes/main/apps/observability/prometheus-operator-crds/app/kustomization.yaml @@ -0,0 +1,6 @@ +--- +# yaml-language-server: $schema=https://json.schemastore.org/kustomization +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - ./helmrelease.yaml diff --git a/kubernetes/main/apps/observability/prometheus-operator-crds/ks.yaml b/kubernetes/main/apps/observability/prometheus-operator-crds/ks.yaml new file mode 100644 index 000000000..76d2ab437 --- /dev/null +++ b/kubernetes/main/apps/observability/prometheus-operator-crds/ks.yaml @@ -0,0 +1,20 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/kustomize.toolkit.fluxcd.io/kustomization_v1.json +apiVersion: kustomize.toolkit.fluxcd.io/v1 +kind: Kustomization +metadata: + name: &app prometheus-operator-crds + namespace: flux-system +spec: + targetNamespace: observability + commonMetadata: + labels: + app.kubernetes.io/name: *app + path: ./kubernetes/main/apps/observability/prometheus-operator-crds/app + prune: false # never should be deleted + sourceRef: + kind: GitRepository + name: home-kubernetes + wait: false + interval: 30m + timeout: 5m diff --git a/kubernetes/main/apps/observability/promtail/app/helmrelease.yaml b/kubernetes/main/apps/observability/promtail/app/helmrelease.yaml new file mode 100644 index 000000000..6489e5090 --- /dev/null +++ b/kubernetes/main/apps/observability/promtail/app/helmrelease.yaml @@ -0,0 +1,30 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/helm.toolkit.fluxcd.io/helmrelease_v2.json +apiVersion: helm.toolkit.fluxcd.io/v2 +kind: HelmRelease +metadata: + name: promtail +spec: + interval: 30m + chart: + spec: + chart: promtail + version: 6.16.6 + sourceRef: + kind: HelmRepository + name: grafana + namespace: flux-system + install: + remediation: + retries: 3 + upgrade: + cleanupOnFail: true + remediation: + retries: 3 + values: + fullnameOverride: promtail + config: + clients: + - url: http://loki-headless.observability.svc.cluster.local:3100/loki/api/v1/push + serviceMonitor: + enabled: true diff --git a/kubernetes/main/apps/observability/promtail/app/kustomization.yaml b/kubernetes/main/apps/observability/promtail/app/kustomization.yaml new file mode 100644 index 000000000..17cbc72b2 --- /dev/null +++ b/kubernetes/main/apps/observability/promtail/app/kustomization.yaml @@ -0,0 +1,6 @@ +--- +# yaml-language-server: $schema=https://json.schemastore.org/kustomization +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - ./helmrelease.yaml diff --git a/kubernetes/main/apps/observability/promtail/ks.yaml b/kubernetes/main/apps/observability/promtail/ks.yaml new file mode 100644 index 000000000..01932a708 --- /dev/null +++ b/kubernetes/main/apps/observability/promtail/ks.yaml @@ -0,0 +1,20 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/kustomize.toolkit.fluxcd.io/kustomization_v1.json +apiVersion: kustomize.toolkit.fluxcd.io/v1 +kind: Kustomization +metadata: + name: &app promtail + namespace: flux-system +spec: + targetNamespace: observability + commonMetadata: + labels: + app.kubernetes.io/name: *app + path: ./kubernetes/main/apps/observability/promtail/app + prune: true + sourceRef: + kind: GitRepository + name: home-kubernetes + wait: false + interval: 30m + timeout: 5m diff --git a/kubernetes/main/apps/system-upgrade/kustomization.yaml b/kubernetes/main/apps/system-upgrade/kustomization.yaml new file mode 100644 index 000000000..affe04660 --- /dev/null +++ b/kubernetes/main/apps/system-upgrade/kustomization.yaml @@ -0,0 +1,9 @@ +--- +# yaml-language-server: $schema=https://json.schemastore.org/kustomization +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + # Pre Flux-Kustomizations + - ./namespace.yaml + # Flux-Kustomizations + - ./system-upgrade-controller/ks.yaml diff --git a/kubernetes/main/apps/system-upgrade/namespace.yaml b/kubernetes/main/apps/system-upgrade/namespace.yaml new file mode 100644 index 000000000..cb902258a --- /dev/null +++ b/kubernetes/main/apps/system-upgrade/namespace.yaml @@ -0,0 +1,38 @@ +--- +apiVersion: v1 +kind: Namespace +metadata: + name: system-upgrade + annotations: + kustomize.toolkit.fluxcd.io/prune: disabled + volsync.backube/privileged-movers: "true" +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/notification.toolkit.fluxcd.io/provider_v1beta3.json +apiVersion: notification.toolkit.fluxcd.io/v1beta3 +kind: Provider +metadata: + name: alert-manager + namespace: system-upgrade +spec: + type: alertmanager + address: http://alertmanager-operated.observability.svc.cluster.local:9093/api/v2/alerts/ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/notification.toolkit.fluxcd.io/alert_v1beta3.json +apiVersion: notification.toolkit.fluxcd.io/v1beta3 +kind: Alert +metadata: + name: alert-manager + namespace: system-upgrade +spec: + providerRef: + name: alert-manager + eventSeverity: error + eventSources: + - kind: HelmRelease + name: "*" + exclusionList: + - "error.*lookup github\\.com" + - "error.*lookup raw\\.githubusercontent\\.com" + - "dial.*tcp.*timeout" + - "waiting.*socket" + suspend: false diff --git a/kubernetes/main/apps/system-upgrade/system-upgrade-controller/app/helmrelease.yaml b/kubernetes/main/apps/system-upgrade/system-upgrade-controller/app/helmrelease.yaml new file mode 100644 index 000000000..a9ec7da63 --- /dev/null +++ b/kubernetes/main/apps/system-upgrade/system-upgrade-controller/app/helmrelease.yaml @@ -0,0 +1,101 @@ +--- +# yaml-language-server: $schema=https://raw.githubusercontent.com/bjw-s/helm-charts/main/charts/other/app-template/schemas/helmrelease-helm-v2.schema.json +apiVersion: helm.toolkit.fluxcd.io/v2 +kind: HelmRelease +metadata: + name: &app system-upgrade-controller +spec: + interval: 30m + chart: + spec: + chart: app-template + version: 3.5.1 + sourceRef: + kind: HelmRepository + name: bjw-s + namespace: flux-system + install: + remediation: + retries: 3 + upgrade: + cleanupOnFail: true + remediation: + strategy: rollback + retries: 3 + values: + controllers: + system-upgrade-controller: + strategy: RollingUpdate + containers: + app: + image: + repository: docker.io/rancher/system-upgrade-controller + tag: v0.14.2@sha256:3cdbfdd90f814702cefb832fc4bdb09ea93865a4d06c6bafd019d1dc6a9f34c9 + env: + SYSTEM_UPGRADE_CONTROLLER_DEBUG: false + SYSTEM_UPGRADE_CONTROLLER_THREADS: 2 + SYSTEM_UPGRADE_JOB_ACTIVE_DEADLINE_SECONDS: 900 + SYSTEM_UPGRADE_JOB_BACKOFF_LIMIT: 99 + SYSTEM_UPGRADE_JOB_IMAGE_PULL_POLICY: IfNotPresent + SYSTEM_UPGRADE_JOB_KUBECTL_IMAGE: registry.k8s.io/kubectl:v1.31.2@sha256:d31de5468fb5c0943358671e3dcf8e4d8281108027efd1f211262d09aedd5519 + SYSTEM_UPGRADE_JOB_POD_REPLACEMENT_POLICY: Failed + SYSTEM_UPGRADE_JOB_PRIVILEGED: true + SYSTEM_UPGRADE_JOB_TTL_SECONDS_AFTER_FINISH: 900 + SYSTEM_UPGRADE_PLAN_POLLING_INTERVAL: 15m + SYSTEM_UPGRADE_CONTROLLER_NAME: *app + SYSTEM_UPGRADE_CONTROLLER_NAMESPACE: + valueFrom: + fieldRef: + fieldPath: metadata.namespace + securityContext: + allowPrivilegeEscalation: false + readOnlyRootFilesystem: true + capabilities: { drop: ["ALL"] } + seccompProfile: + type: RuntimeDefault + defaultPodOptions: + securityContext: + runAsNonRoot: true + runAsUser: 65534 + runAsGroup: 65534 + seccompProfile: { type: RuntimeDefault } + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: node-role.kubernetes.io/control-plane + operator: Exists + tolerations: + - key: CriticalAddonsOnly + operator: Exists + - key: node-role.kubernetes.io/control-plane + operator: Exists + effect: NoSchedule + - key: node-role.kubernetes.io/master + operator: Exists + effect: NoSchedule + serviceAccount: + create: true + name: system-upgrade + persistence: + tmp: + type: emptyDir + etc-ssl: + type: hostPath + hostPath: /etc/ssl + hostPathType: DirectoryOrCreate + globalMounts: + - readOnly: true + etc-pki: + type: hostPath + hostPath: /etc/pki + hostPathType: DirectoryOrCreate + globalMounts: + - readOnly: true + etc-ca-certificates: + type: hostPath + hostPath: /etc/ca-certificates + hostPathType: DirectoryOrCreate + globalMounts: + - readOnly: true diff --git a/kubernetes/main/apps/system-upgrade/system-upgrade-controller/app/kustomization.yaml b/kubernetes/main/apps/system-upgrade/system-upgrade-controller/app/kustomization.yaml new file mode 100644 index 000000000..10a8a8289 --- /dev/null +++ b/kubernetes/main/apps/system-upgrade/system-upgrade-controller/app/kustomization.yaml @@ -0,0 +1,7 @@ +--- +# yaml-language-server: $schema=https://json.schemastore.org/kustomization +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - helmrelease.yaml + - rbac.yaml diff --git a/kubernetes/main/apps/system-upgrade/system-upgrade-controller/app/rbac.yaml b/kubernetes/main/apps/system-upgrade/system-upgrade-controller/app/rbac.yaml new file mode 100644 index 000000000..e9f4d789c --- /dev/null +++ b/kubernetes/main/apps/system-upgrade/system-upgrade-controller/app/rbac.yaml @@ -0,0 +1,21 @@ +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: system-upgrade +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: cluster-admin +subjects: + - kind: ServiceAccount + name: system-upgrade + namespace: system-upgrade +--- +apiVersion: talos.dev/v1alpha1 +kind: ServiceAccount +metadata: + name: talos +spec: + roles: + - os:admin diff --git a/kubernetes/main/apps/system-upgrade/system-upgrade-controller/ks.yaml b/kubernetes/main/apps/system-upgrade/system-upgrade-controller/ks.yaml new file mode 100644 index 000000000..d13b87500 --- /dev/null +++ b/kubernetes/main/apps/system-upgrade/system-upgrade-controller/ks.yaml @@ -0,0 +1,49 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/kustomize.toolkit.fluxcd.io/kustomization_v1.json +apiVersion: kustomize.toolkit.fluxcd.io/v1 +kind: Kustomization +metadata: + name: &app system-upgrade-controller + namespace: flux-system +spec: + targetNamespace: system-upgrade + commonMetadata: + labels: + app.kubernetes.io/name: *app + path: ./kubernetes/main/apps/system-upgrade/system-upgrade-controller/app + prune: true + sourceRef: + kind: GitRepository + name: home-kubernetes + wait: true + interval: 30m + timeout: 5m +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/kustomize.toolkit.fluxcd.io/kustomization_v1.json +apiVersion: kustomize.toolkit.fluxcd.io/v1 +kind: Kustomization +metadata: + name: &app system-upgrade-controller-plans + namespace: flux-system +spec: + targetNamespace: system-upgrade + commonMetadata: + labels: + app.kubernetes.io/name: *app + dependsOn: + - name: system-upgrade-controller + path: ./kubernetes/main/apps/system-upgrade/system-upgrade-controller/plans + prune: true + sourceRef: + kind: GitRepository + name: home-kubernetes + wait: false + interval: 30m + timeout: 5m + postBuild: + substitute: + TALOS_SCHEMATIC_ID: 1ebbba98f713bfc919cbb0c672240180d64d24b3f2cc478a2ffe8942716e57c1 + # renovate: datasource=docker depName=ghcr.io/siderolabs/installer + TALOS_VERSION: v1.8.3 + # renovate: datasource=docker depName=ghcr.io/siderolabs/kubelet + KUBERNETES_VERSION: v1.31.2 diff --git a/kubernetes/main/apps/system-upgrade/system-upgrade-controller/plans/kubernetes.yaml b/kubernetes/main/apps/system-upgrade/system-upgrade-controller/plans/kubernetes.yaml new file mode 100644 index 000000000..ae8704eac --- /dev/null +++ b/kubernetes/main/apps/system-upgrade/system-upgrade-controller/plans/kubernetes.yaml @@ -0,0 +1,45 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/upgrade.cattle.io/plan_v1.json +apiVersion: upgrade.cattle.io/v1 +kind: Plan +metadata: + name: kubernetes +spec: + version: ${KUBERNETES_VERSION} + serviceAccountName: system-upgrade + secrets: + - name: talos + path: /var/run/secrets/talos.dev + ignoreUpdates: true + concurrency: 1 + exclusive: true + nodeSelector: + matchExpressions: + - key: feature.node.kubernetes.io/system-os_release.ID + operator: In + values: ["talos"] + - key: node-role.kubernetes.io/control-plane + operator: Exists + tolerations: + - key: CriticalAddonsOnly + operator: Exists + - key: node-role.kubernetes.io/control-plane + operator: Exists + effect: NoSchedule + prepare: &prepare + image: ghcr.io/siderolabs/talosctl:${TALOS_VERSION} + envs: + - name: NODE_IP + valueFrom: + fieldRef: + fieldPath: status.hostIP + args: + - --nodes=$(NODE_IP) + - health + - --server=true + upgrade: + <<: *prepare + args: + - --nodes=$(NODE_IP) + - upgrade-k8s + - --to=$(SYSTEM_UPGRADE_PLAN_LATEST_VERSION) diff --git a/kubernetes/main/apps/system-upgrade/system-upgrade-controller/plans/kustomization.yaml b/kubernetes/main/apps/system-upgrade/system-upgrade-controller/plans/kustomization.yaml new file mode 100644 index 000000000..061d8ad0d --- /dev/null +++ b/kubernetes/main/apps/system-upgrade/system-upgrade-controller/plans/kustomization.yaml @@ -0,0 +1,7 @@ +--- +# yaml-language-server: $schema=https://json.schemastore.org/kustomization +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - ./kubernetes.yaml + - ./talos.yaml diff --git a/kubernetes/main/apps/system-upgrade/system-upgrade-controller/plans/talos.yaml b/kubernetes/main/apps/system-upgrade/system-upgrade-controller/plans/talos.yaml new file mode 100644 index 000000000..8cbae8f10 --- /dev/null +++ b/kubernetes/main/apps/system-upgrade/system-upgrade-controller/plans/talos.yaml @@ -0,0 +1,48 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/upgrade.cattle.io/plan_v1.json +apiVersion: upgrade.cattle.io/v1 +kind: Plan +metadata: + name: talos +spec: + version: ${TALOS_VERSION} + serviceAccountName: system-upgrade + secrets: + - name: talos + path: /var/run/secrets/talos.dev + ignoreUpdates: true + concurrency: 1 + exclusive: true + nodeSelector: + matchExpressions: + - key: feature.node.kubernetes.io/system-os_release.ID + operator: In + values: ["talos"] + - key: feature.node.kubernetes.io/system-os_release.VERSION_ID + operator: NotIn + values: ["${TALOS_VERSION}"] + tolerations: + - key: CriticalAddonsOnly + operator: Exists + - key: node-role.kubernetes.io/control-plane + operator: Exists + effect: NoSchedule + prepare: &prepare + image: ghcr.io/siderolabs/talosctl:${TALOS_VERSION} + envs: + - name: NODE_IP + valueFrom: + fieldRef: + fieldPath: status.hostIP + args: + - --nodes=$(NODE_IP) + - health + - --server=true + upgrade: + <<: *prepare + args: + - --nodes=$(NODE_IP) + - upgrade + - --image=factory.talos.dev/installer-secureboot/${TALOS_SCHEMATIC_ID}:$(SYSTEM_UPGRADE_PLAN_LATEST_VERSION) + - --preserve=true + - --wait=false diff --git a/kubernetes/main/apps/vault/kustomization.yaml b/kubernetes/main/apps/vault/kustomization.yaml new file mode 100644 index 000000000..3d21bd1cd --- /dev/null +++ b/kubernetes/main/apps/vault/kustomization.yaml @@ -0,0 +1,9 @@ +--- +# yaml-language-server: $schema=https://json.schemastore.org/kustomization +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + # Pre Flux-Kustomizations + - ./namespace.yaml + # Flux-Kustomizations + - ./vault/ks.yaml diff --git a/kubernetes/main/apps/vault/namespace.yaml b/kubernetes/main/apps/vault/namespace.yaml new file mode 100644 index 000000000..33be07ae2 --- /dev/null +++ b/kubernetes/main/apps/vault/namespace.yaml @@ -0,0 +1,5 @@ +--- +apiVersion: v1 +kind: Namespace +metadata: + name: vault diff --git a/kubernetes/main/apps/vault/vault/app/helmrelease.yaml b/kubernetes/main/apps/vault/vault/app/helmrelease.yaml new file mode 100644 index 000000000..b0916dbc9 --- /dev/null +++ b/kubernetes/main/apps/vault/vault/app/helmrelease.yaml @@ -0,0 +1,452 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/helm.toolkit.fluxcd.io/helmrelease_v2.json +apiVersion: helm.toolkit.fluxcd.io/v2 +kind: HelmRelease +metadata: + name: &app vault +spec: + interval: 30m + chart: + spec: + chart: vault + version: 0.29.0 + sourceRef: + kind: HelmRepository + name: vault + namespace: flux-system + maxHistory: 3 + install: + remediation: + retries: 3 + upgrade: + cleanupOnFail: true + remediation: + retries: 3 + uninstall: + keepHistory: false + + values: + + global: + enabled: true + tlsDisable: false + serverTelemetry: + prometheusOperator: false + + injector: + enabled: false + + server: + enabled: true + image: + repository: "hashicorp/vault" + tag: "1.18.1" + pullPolicy: IfNotPresent + + updateStrategyType: "OnDelete" + + logLevel: "info" + logFormat: "json" + + resources: {} + + authDelegator: + enabled: true + + extraInitContainers: null + + extraContainers: null + + shareProcessNamespace: false + + extraArgs: "" + + # extraPorts is a list of extra ports. Specified as a YAML list. + # This is useful if you need to add additional ports to the statefulset in dynamic way. + extraPorts: null + # - containerPort: 8300 + # name: http-monitoring + + # Used to define custom readinessProbe settings + readinessProbe: + enabled: true + port: 8200 + failureThreshold: 2 + initialDelaySeconds: 5 + periodSeconds: 5 + successThreshold: 1 + timeoutSeconds: 3 + livenessProbe: + enabled: true + path: "/v1/sys/health?standbyok=true" + port: 8200 + failureThreshold: 2 + initialDelaySeconds: 60 + periodSeconds: 5 + successThreshold: 1 + timeoutSeconds: 3 + + terminationGracePeriodSeconds: 10 + + preStopSleepSeconds: 5 + + # Used to define commands to run after the pod is ready. + # This can be used to automate processes such as initialization + # or boostrapping auth methods. + postStart: [] + # - /bin/sh + # - -c + # - /vault/userconfig/myscript/run.sh + + extraEnvironmentVars: + VAULT_TLSCERT: /vault/tls/tls.crt + VAULT_TLSKEY: /vault/tls/tls.key + + extraSecretEnvironmentVars: [] + + volumes: + - name: vault-tls + secret: + defaultMode: 420 + secretName: darkfellanet-tls + + volumeMounts: + - mountPath: /vault/tls + name: vault-tls + readOnly: true + + affinity: | + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchLabels: + app.kubernetes.io/name: {{ template "vault.name" . }} + app.kubernetes.io/instance: "{{ .Release.Name }}" + component: server + topologyKey: kubernetes.io/hostname + + # Topology settings for server pods + # ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/ + # This should be either a multi-line string or YAML matching the topologySpreadConstraints array + # in a PodSpec. + topologySpreadConstraints: [] + + # Toleration Settings for server pods + # This should be either a multi-line string or YAML matching the Toleration array + # in a PodSpec. + tolerations: [] + + # nodeSelector labels for server pod assignment, formatted as a multi-line string or YAML map. + # ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector + # Example: + # nodeSelector: + # beta.kubernetes.io/arch: amd64 + nodeSelector: {} + + # Enables network policy for server pods + networkPolicy: + enabled: false + egress: [] + # egress: + # - to: + # - ipBlock: + # cidr: 10.0.0.0/24 + # ports: + # - protocol: TCP + # port: 443 + ingress: + - from: + - namespaceSelector: {} + ports: + - port: 8200 + protocol: TCP + - port: 8201 + protocol: TCP + + # Priority class for server pods + priorityClassName: "" + + # Extra labels to attach to the server pods + # This should be a YAML map of the labels to apply to the server pods + extraLabels: {} + + # Extra annotations to attach to the server pods + # This can either be YAML or a YAML-formatted multi-line templated string map + # of the annotations to apply to the server pods + annotations: {} + + # Add an annotation to the server configmap and the statefulset pods, + # vaultproject.io/config-checksum, that is a hash of the Vault configuration. + # This can be used together with an OnDelete deployment strategy to help + # identify which pods still need to be deleted during a deployment to pick up + # any configuration changes. + includeConfigAnnotation: false + + # Enables a headless service to be used by the Vault Statefulset + service: + enabled: false + # Enable or disable the vault-active service, which selects Vault pods that + # have labeled themselves as the cluster leader with `vault-active: "true"`. + active: + enabled: false + # Extra annotations for the service definition. This can either be YAML or a + # YAML-formatted multi-line templated string map of the annotations to apply + # to the active service. + annotations: {} + # Enable or disable the vault-standby service, which selects Vault pods that + # have labeled themselves as a cluster follower with `vault-active: "false"`. + standby: + enabled: false + # Extra annotations for the service definition. This can either be YAML or a + # YAML-formatted multi-line templated string map of the annotations to apply + # to the standby service. + annotations: {} + # If enabled, the service selectors will include `app.kubernetes.io/instance: {{ .Release.Name }}` + # When disabled, services may select Vault pods not deployed from the chart. + # Does not affect the headless vault-internal service with `ClusterIP: None` + instanceSelector: + enabled: true + # clusterIP controls whether a Cluster IP address is attached to the + # Vault service within Kubernetes. By default, the Vault service will + # be given a Cluster IP address, set to None to disable. When disabled + # Kubernetes will create a "headless" service. Headless services can be + # used to communicate with pods directly through DNS instead of a round-robin + # load balancer. + # clusterIP: None + + # Configures the service type for the main Vault service. Can be ClusterIP + # or NodePort. + #type: ClusterIP + + # The IP family and IP families options are to set the behaviour in a dual-stack environment. + # Omitting these values will let the service fall back to whatever the CNI dictates the defaults + # should be. + # These are only supported for kubernetes versions >=1.23.0 + # + # Configures the service's supported IP family policy, can be either: + # SingleStack: Single-stack service. The control plane allocates a cluster IP for the Service, using the first configured service cluster IP range. + # PreferDualStack: Allocates IPv4 and IPv6 cluster IPs for the Service. + # RequireDualStack: Allocates Service .spec.ClusterIPs from both IPv4 and IPv6 address ranges. + ipFamilyPolicy: "" + + # Sets the families that should be supported and the order in which they should be applied to ClusterIP as well. + # Can be IPv4 and/or IPv6. + ipFamilies: [] + + # Do not wait for pods to be ready before including them in the services' + # targets. Does not apply to the headless service, which is used for + # cluster-internal communication. + publishNotReadyAddresses: true + + # The externalTrafficPolicy can be set to either Cluster or Local + # and is only valid for LoadBalancer and NodePort service types. + # The default value is Cluster. + # ref: https://kubernetes.io/docs/concepts/services-networking/service/#external-traffic-policy + externalTrafficPolicy: Cluster + + # If type is set to "NodePort", a specific nodePort value can be configured, + # will be random if left blank. + #nodePort: 30000 + + # When HA mode is enabled + # If type is set to "NodePort", a specific nodePort value can be configured, + # will be random if left blank. + #activeNodePort: 30001 + + # When HA mode is enabled + # If type is set to "NodePort", a specific nodePort value can be configured, + # will be random if left blank. + #standbyNodePort: 30002 + + # Port on which Vault server is listening + port: 8200 + # Target port to which the service should be mapped to + targetPort: 8200 + # Extra annotations for the service definition. This can either be YAML or a + # YAML-formatted multi-line templated string map of the annotations to apply + # to the service. + annotations: {} + + dataStorage: + enabled: true + size: 20Gi + mountPath: "/vault/data" + accessMode: ReadWriteOnce + + persistentVolumeClaimRetentionPolicy: + whenDeleted: Retain + whenScaled: Retain + + auditStorage: + enabled: true + size: 10Gi + mountPath: "/vault/audit" + accessMode: ReadWriteOnce + + standalone: + enabled: false + + ha: + enabled: true + replicas: 1 + raft: + enabled: true + setNodeId: false + config: | + + seal "awskms" { + region = "us-east-1" + access_key = "${AWS_ACCESS_KEY}" + secret_key = "${AWS_SECRET_KEY}" + kms_key_id = "${AWS_KEY_ID}" + } + + ui = true + api_addr = "https://vault.${PUBLIC_DOMAIN}:8200" + cluster_addr = "https://vault.${PUBLIC_DOMAIN}:8201" + + listener "tcp" { + tls_disable = 0 + address = "0.0.0.0:8200" + cluster_address = "0.0.0.0:8201" + tls_cert_file = "/vault/tls/tls.crt" + tls_key_file = "/vault/tls/tls.key" + tls_min_version = "tls13" + + # Enable unauthenticated metrics access (necessary for Prometheus Operator) + #telemetry { + # unauthenticated_metrics_access = "true" + #} + } + + storage "raft" { + path = "/vault/data" + } + + service_registration "kubernetes" {} + + disruptionBudget: + enabled: true + maxUnavailable: null + + serviceAccount: + create: true + name: "" + createSecret: false + serviceDiscovery: + enabled: true + + statefulSet: + annotations: {} + securityContext: + pod: {} + container: {} + + hostNetwork: false + + ui: + enabled: true + publishNotReadyAddresses: true + activeVaultPodOnly: false + serviceType: "LoadBalancer" + serviceNodePort: null + externalPort: 8200 + targetPort: 8200 + externalTrafficPolicy: Cluster + annotations: + external-dns.alpha.kubernetes.io/hostname: vault.${PUBLIC_DOMAIN} + lbipam.cilium.io/ips: 192.168.91.96 + + serverTelemetry: + # Enable support for the Prometheus Operator. If authorization is not set for authenticating + # to Vault's metrics endpoint, the following Vault server `telemetry{}` config must be included + # in the `listener "tcp"{}` stanza + # telemetry { + # unauthenticated_metrics_access = "true" + # } + # + # See the `standalone.config` for a more complete example of this. + # + # In addition, a top level `telemetry{}` stanza must also be included in the Vault configuration: + # + # example: + # telemetry { + # prometheus_retention_time = "30s" + # disable_hostname = true + # } + # + # Configuration for monitoring the Vault server. + serviceMonitor: + # The Prometheus operator *must* be installed before enabling this feature, + # if not the chart will fail to install due to missing CustomResourceDefinitions + # provided by the operator. + # + # Instructions on how to install the Helm chart can be found here: + # https://github.com/prometheus-community/helm-charts/tree/main/charts/kube-prometheus-stack + # More information can be found here: + # https://github.com/prometheus-operator/prometheus-operator + # https://github.com/prometheus-operator/kube-prometheus + + # Enable deployment of the Vault Server ServiceMonitor CustomResource. + enabled: false + + # Selector labels to add to the ServiceMonitor. + # When empty, defaults to: + # release: prometheus + selectors: {} + + # Interval at which Prometheus scrapes metrics + interval: 30s + + # Timeout for Prometheus scrapes + scrapeTimeout: 10s + + # tlsConfig used for scraping the Vault metrics API. + # See API reference: https://prometheus-operator.dev/docs/api-reference/api/#monitoring.coreos.com/v1.TLSConfig + # example: + # tlsConfig: + # ca: + # secret: + # name: vault-metrics-client + # key: ca.crt + tlsConfig: {} + + # authorization used for scraping the Vault metrics API. + # See API reference: https://prometheus-operator.dev/docs/api-reference/api/#monitoring.coreos.com/v1.SafeAuthorization + # example: + # authorization: + # credentials: + # name: vault-metrics-client + # key: token + authorization: {} + + prometheusRules: + # The Prometheus operator *must* be installed before enabling this feature, + # if not the chart will fail to install due to missing CustomResourceDefinitions + # provided by the operator. + + # Deploy the PrometheusRule custom resource for AlertManager based alerts. + # Requires that AlertManager is properly deployed. + enabled: false + + # Selector labels to add to the PrometheusRules. + # When empty, defaults to: + # release: prometheus + selectors: {} + + # Some example rules. + rules: [] + # - alert: vault-HighResponseTime + # annotations: + # message: The response time of Vault is over 500ms on average over the last 5 minutes. + # expr: vault_core_handle_request{quantile="0.5", namespace="mynamespace"} > 500 + # for: 5m + # labels: + # severity: warning + # - alert: vault-HighResponseTime + # annotations: + # message: The response time of Vault is over 1s on average over the last 5 minutes. + # expr: vault_core_handle_request{quantile="0.5", namespace="mynamespace"} > 1000 + # for: 5m + # labels: + # severity: critical \ No newline at end of file diff --git a/kubernetes/main/apps/vault/vault/app/kustomization.yaml b/kubernetes/main/apps/vault/vault/app/kustomization.yaml new file mode 100644 index 000000000..17cbc72b2 --- /dev/null +++ b/kubernetes/main/apps/vault/vault/app/kustomization.yaml @@ -0,0 +1,6 @@ +--- +# yaml-language-server: $schema=https://json.schemastore.org/kustomization +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - ./helmrelease.yaml diff --git a/kubernetes/main/apps/vault/vault/ks.yaml b/kubernetes/main/apps/vault/vault/ks.yaml new file mode 100644 index 000000000..1a9df2cc4 --- /dev/null +++ b/kubernetes/main/apps/vault/vault/ks.yaml @@ -0,0 +1,23 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/kustomize.toolkit.fluxcd.io/kustomization_v1.json +apiVersion: kustomize.toolkit.fluxcd.io/v1 +kind: Kustomization +metadata: + name: &app vault + namespace: flux-system +spec: + targetNamespace: vault + commonMetadata: + labels: + app.kubernetes.io/name: *app + path: ./kubernetes/main/apps/vault/vault/app + prune: true + sourceRef: + kind: GitRepository + name: home-kubernetes + wait: false + interval: 30m + timeout: 5m + postBuild: + substitute: + APP: *app diff --git a/kubernetes/main/apps/vaultwarden/kustomization.yaml b/kubernetes/main/apps/vaultwarden/kustomization.yaml new file mode 100644 index 000000000..0df31c69f --- /dev/null +++ b/kubernetes/main/apps/vaultwarden/kustomization.yaml @@ -0,0 +1,9 @@ +--- +# yaml-language-server: $schema=https://json.schemastore.org/kustomization +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + # Pre Flux-Kustomizations + - ./namespace.yaml + # Flux-Kustomizations + - ./vaultwarden/ks.yaml diff --git a/kubernetes/main/apps/vaultwarden/namespace.yaml b/kubernetes/main/apps/vaultwarden/namespace.yaml new file mode 100644 index 000000000..9201211cd --- /dev/null +++ b/kubernetes/main/apps/vaultwarden/namespace.yaml @@ -0,0 +1,35 @@ +--- +apiVersion: v1 +kind: Namespace +metadata: + name: vaultwarden +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/notification.toolkit.fluxcd.io/provider_v1beta3.json +apiVersion: notification.toolkit.fluxcd.io/v1beta3 +kind: Provider +metadata: + name: alert-manager + namespace: vaultwarden +spec: + type: alertmanager + address: http://alertmanager-operated.observability.svc.cluster.local:9093/api/v2/alerts/ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/notification.toolkit.fluxcd.io/alert_v1beta3.json +apiVersion: notification.toolkit.fluxcd.io/v1beta3 +kind: Alert +metadata: + name: alert-manager + namespace: vaultwarden +spec: + providerRef: + name: alert-manager + eventSeverity: error + eventSources: + - kind: HelmRelease + name: "*" + exclusionList: + - "error.*lookup github\\.com" + - "error.*lookup raw\\.githubusercontent\\.com" + - "dial.*tcp.*timeout" + - "waiting.*socket" + suspend: false diff --git a/kubernetes/main/apps/vaultwarden/vaultwarden/app/externalsecret.yaml b/kubernetes/main/apps/vaultwarden/vaultwarden/app/externalsecret.yaml new file mode 100644 index 000000000..3d5c750ba --- /dev/null +++ b/kubernetes/main/apps/vaultwarden/vaultwarden/app/externalsecret.yaml @@ -0,0 +1,33 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/external-secrets.io/externalsecret_v1beta1.json +apiVersion: external-secrets.io/v1beta1 +kind: ExternalSecret +metadata: + name: &secret vaultwarden-secret +spec: + secretStoreRef: + kind: ClusterSecretStore + name: vault-backend + target: + name: *secret + template: + engineVersion: v2 + data: + VAULTWARDEN_ADMIN_TOKEN: "{{ .VAULTWARDEN_ADMIN_TOKEN }}" + VAULTWARDEN_DATABASE_URI: "{{ .VAULTWARDEN_DATABASE_URI }}" + SMTP_USERNAME: "{{ .SMTP_USERNAME }}" + SMTP_VAULTWARDEN_PASS: "{{ .SMTP_VAULTWARDEN_PASS }}" + SMTP__PORT: "587" + SMTP_SENDER: "{{ .SMTP_SENDER }}" + INIT_POSTGRES_DBNAME: vaultwarden + INIT_POSTGRES_HOST: postgres17-rw.database.svc.cluster.local + INIT_POSTGRES_USER: "{{ .VAULTWARDEN_POSTGRESS_USER }}" + INIT_POSTGRES_PASS: "{{ .VAULTWARDEN_POSTGRES_PASS }}" + INIT_POSTGRES_SUPER_PASS: "{{ .POSTGRES_SUPER_PASS }}" + dataFrom: + - extract: + key: secrets/vaultwarden + - extract: + key: secrets/Brevo + - extract: + key: secrets/cloudnative-pg diff --git a/kubernetes/main/apps/vaultwarden/vaultwarden/app/helmrelease.yaml b/kubernetes/main/apps/vaultwarden/vaultwarden/app/helmrelease.yaml new file mode 100644 index 000000000..0543eea9d --- /dev/null +++ b/kubernetes/main/apps/vaultwarden/vaultwarden/app/helmrelease.yaml @@ -0,0 +1,153 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/helm.toolkit.fluxcd.io/helmrelease_v2.json +apiVersion: helm.toolkit.fluxcd.io/v2 +kind: HelmRelease +metadata: + name: &app vaultwarden +spec: + interval: 30m + chart: + spec: + chart: vaultwarden + version: 0.29.4 + sourceRef: + kind: HelmRepository + name: vaultwarden + namespace: flux-system + maxHistory: 3 + install: + remediation: + retries: 3 + upgrade: + cleanupOnFail: true + remediation: + retries: 3 + uninstall: + keepHistory: false + valuesFrom: + - targetPath: smtp.from + kind: Secret + name: vaultwarden-secret + valuesKey: SMTP_SENDER + values: + image: + registry: docker.io + repository: vaultwarden/server + tag: 1.32.4-alpine@sha256:3eda23498a8199e58e81b01f68a3f028830224ed6f5651c8f0be33e15e5db0b7 + pullPolicy: IfNotPresent + commonAnnotations: + reloader.stakater.com/auto: "true" + initContainers: + - name: init-db + image: ghcr.io/onedr0p/postgres-init:16.4@sha256:e41c745b54485341e00efbd27556f0717623a119f0d5107e5ff831aa1322c76f + imagePullPolicy: IfNotPresent + envFrom: + - secretRef: + name: vaultwarden-secret + podSecurityContext: + securityContext: + allowPrivilegeEscalation: false + privileged: false + readOnlyRootFilesystem: true + runAsNonRoot: true + runAsGroup: 1001 + runAsUser: 1001 + capabilities: + drop: + - ALL + data: + name: vaultwarden-data + size: 15Gi + path: /data + keepPvc: false + accessMode: ReadWriteOnce + attachments: + name: vaultwarden-files + size: 100Gi + path: /files + keepPvc: false + accessMode: ReadWriteOnce + webVaultEnabled: true + database: + type: postgresql + existingSecret: &secret vaultwarden-secret + existingSecretKey: VAULTWARDEN_DATABASE_URI + eventsDayRetain: 14 + domain: https://vaultwarden.${PUBLIC_DOMAIN} + sendsAllowed: true + trashAutoDeleteDays: 14 + signupsAllowed: true + signupsVerify: true + signupDomains: ${PUBLIC_DOMAIN} + invitationsAllowed: true + invitationOrgName: Vaultwarden + ipHeader: X-Real-IP + iconBlacklistNonGlobalIps: true + requireDeviceEmail: true + adminToken: + existingSecret: *secret + existingSecretKey: VAULTWARDEN_ADMIN_TOKEN + value: {} + timeZone: Europe/Sofia + smtp: + existingSecret: *secret + host: smtp-relay.brevo.com + security: starttls + port: 587 + fromName: DarkfellaNET + username: + existingSecretKey: SMTP_USERNAME + password: + existingSecretKey: SMTP_VAULTWARDEN_PASS + authMechanism: Login + ingress: + enabled: true + class: external + nginxIngressAnnotations: true + additionalAnnotations: + nginx.ingress.kubernetes.io/configuration-snippet: | + more_set_headers "access-control-allow-origin https://vaultwarden.${PUBLIC_DOMAIN}"; + external-dns.alpha.kubernetes.io/target: external.${PUBLIC_DOMAIN} + nginx.ingress.kubernetes.io/auth-url: "https://$host/oauth2/auth" + nginx.ingress.kubernetes.io/auth-signin: "https://$host/oauth2/start?rd=$escaped_request_uri" + nginx.ingress.kubernetes.io/auth-snippet: | + # Bypass authentication for specific paths + if ($request_uri ~* "^/$") { + return 200; + } + if ($request_uri ~* "^/.*\.js") { + return 200; + } + if ($request_uri ~* "^/#/.*") { + return 200; + } + if ($request_uri ~* "^/#/login") { + return 200; + } + if ($request_uri ~* "^/#/2fa") { + return 200; + } + if ($request_uri ~* "^/#/vault") { + return 200; + } + if ($request_uri ~* "^/api/.*") { + return 200; + } + if ($request_uri ~* "^/images/.*") { + return 200; + } + if ($request_uri ~* "^/identity/.*") { + return 200; + } + if ($request_uri ~* "^/app/.*") { + return 200; + } + if ($request_uri ~* "^/locales/.*") { + return 200; + } + if ($request_uri ~* "^/alive") { + return 200; + } + hostname: vaultwarden.${PUBLIC_DOMAIN} + path: / + pathType: Prefix diff --git a/kubernetes/main/apps/vaultwarden/vaultwarden/app/kustomization.yaml b/kubernetes/main/apps/vaultwarden/vaultwarden/app/kustomization.yaml new file mode 100644 index 000000000..a2c7d946b --- /dev/null +++ b/kubernetes/main/apps/vaultwarden/vaultwarden/app/kustomization.yaml @@ -0,0 +1,7 @@ +--- +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - ./externalsecret.yaml + - ./helmrelease.yaml + - ./resources/gatus-ep.yaml diff --git a/kubernetes/main/apps/vaultwarden/vaultwarden/app/resources/gatus-ep.yaml b/kubernetes/main/apps/vaultwarden/vaultwarden/app/resources/gatus-ep.yaml new file mode 100644 index 000000000..affe35b00 --- /dev/null +++ b/kubernetes/main/apps/vaultwarden/vaultwarden/app/resources/gatus-ep.yaml @@ -0,0 +1,20 @@ +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: "vaultwarden-gatus-ep" + labels: + gatus.io/enabled: "true" +data: + config.yaml: | + endpoints: + - name: "vaultwarden" + group: external + url: "https://vaultwarden.${PUBLIC_DOMAIN}/alive" + interval: 1m + client: + dns-resolver: tcp://1.1.1.1:53 + conditions: + - "[STATUS] == 200" + alerts: + - type: pushover diff --git a/kubernetes/main/apps/vaultwarden/vaultwarden/ks.yaml b/kubernetes/main/apps/vaultwarden/vaultwarden/ks.yaml new file mode 100644 index 000000000..c7b4197be --- /dev/null +++ b/kubernetes/main/apps/vaultwarden/vaultwarden/ks.yaml @@ -0,0 +1,27 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/kustomize.toolkit.fluxcd.io/kustomization_v1.json +apiVersion: kustomize.toolkit.fluxcd.io/v1 +kind: Kustomization +metadata: + name: &app vaultwarden + namespace: flux-system +spec: + dependsOn: + - name: external-secrets-stores + - name: cloudnative-pg-cluster + targetNamespace: vaultwarden + commonMetadata: + labels: + app.kubernetes.io/name: *app + path: ./kubernetes/main/apps/vaultwarden/vaultwarden/app + prune: true + sourceRef: + kind: GitRepository + name: home-kubernetes + wait: false + interval: 30m + timeout: 5m + postBuild: + substitute: + APP: *app + GATUS_PATH: /alive diff --git a/kubernetes/main/apps/volsync-system/kustomization.yaml b/kubernetes/main/apps/volsync-system/kustomization.yaml new file mode 100644 index 000000000..5b5a44ea5 --- /dev/null +++ b/kubernetes/main/apps/volsync-system/kustomization.yaml @@ -0,0 +1,10 @@ +--- +# yaml-language-server: $schema=https://json.schemastore.org/kustomization +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + # Pre Flux-Kustomizations + - ./namespace.yaml + # Flux-Kustomizations + - ./snapshot-controller/ks.yaml + - ./volsync/ks.yaml diff --git a/kubernetes/main/apps/volsync-system/namespace.yaml b/kubernetes/main/apps/volsync-system/namespace.yaml new file mode 100644 index 000000000..082ad6dac --- /dev/null +++ b/kubernetes/main/apps/volsync-system/namespace.yaml @@ -0,0 +1,38 @@ +--- +apiVersion: v1 +kind: Namespace +metadata: + name: volsync-system + annotations: + kustomize.toolkit.fluxcd.io/prune: disabled + volsync.backube/privileged-movers: "true" +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/notification.toolkit.fluxcd.io/provider_v1beta3.json +apiVersion: notification.toolkit.fluxcd.io/v1beta3 +kind: Provider +metadata: + name: alert-manager + namespace: volsync-system +spec: + type: alertmanager + address: http://alertmanager-operated.observability.svc.cluster.local:9093/api/v2/alerts/ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/notification.toolkit.fluxcd.io/alert_v1beta3.json +apiVersion: notification.toolkit.fluxcd.io/v1beta3 +kind: Alert +metadata: + name: alert-manager + namespace: volsync-system +spec: + providerRef: + name: alert-manager + eventSeverity: error + eventSources: + - kind: HelmRelease + name: "*" + exclusionList: + - "error.*lookup github\\.com" + - "error.*lookup raw\\.githubusercontent\\.com" + - "dial.*tcp.*timeout" + - "waiting.*socket" + suspend: false diff --git a/kubernetes/main/apps/volsync-system/snapshot-controller/app/helmrelease.yaml b/kubernetes/main/apps/volsync-system/snapshot-controller/app/helmrelease.yaml new file mode 100644 index 000000000..1ed4f8a22 --- /dev/null +++ b/kubernetes/main/apps/volsync-system/snapshot-controller/app/helmrelease.yaml @@ -0,0 +1,32 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/helm.toolkit.fluxcd.io/helmrelease_v2.json +apiVersion: helm.toolkit.fluxcd.io/v2 +kind: HelmRelease +metadata: + name: snapshot-controller +spec: + interval: 30m + chart: + spec: + chart: snapshot-controller + version: 3.0.6 + sourceRef: + kind: HelmRepository + name: piraeus + namespace: flux-system + install: + crds: CreateReplace + remediation: + retries: 3 + upgrade: + cleanupOnFail: true + crds: CreateReplace + remediation: + strategy: rollback + retries: 3 + values: + controller: + serviceMonitor: + create: true + webhook: + enabled: true diff --git a/kubernetes/main/apps/volsync-system/snapshot-controller/app/kustomization.yaml b/kubernetes/main/apps/volsync-system/snapshot-controller/app/kustomization.yaml new file mode 100644 index 000000000..17cbc72b2 --- /dev/null +++ b/kubernetes/main/apps/volsync-system/snapshot-controller/app/kustomization.yaml @@ -0,0 +1,6 @@ +--- +# yaml-language-server: $schema=https://json.schemastore.org/kustomization +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - ./helmrelease.yaml diff --git a/kubernetes/main/apps/volsync-system/snapshot-controller/config/kustomization.yaml b/kubernetes/main/apps/volsync-system/snapshot-controller/config/kustomization.yaml new file mode 100644 index 000000000..4a74bb121 --- /dev/null +++ b/kubernetes/main/apps/volsync-system/snapshot-controller/config/kustomization.yaml @@ -0,0 +1,6 @@ +--- +# yaml-language-server: $schema=https://json.schemastore.org/kustomization +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - ./snapshotclass.yaml diff --git a/kubernetes/main/apps/volsync-system/snapshot-controller/config/snapshotclass.yaml b/kubernetes/main/apps/volsync-system/snapshot-controller/config/snapshotclass.yaml new file mode 100644 index 000000000..1490d7355 --- /dev/null +++ b/kubernetes/main/apps/volsync-system/snapshot-controller/config/snapshotclass.yaml @@ -0,0 +1,9 @@ +--- +apiVersion: snapshot.storage.k8s.io/v1 +kind: VolumeSnapshotClass +metadata: + name: topolvm-provisioner-thin + annotations: + snapshot.storage.kubernetes.io/is-default-class: "true" +driver: topolvm.io +deletionPolicy: Delete diff --git a/kubernetes/main/apps/volsync-system/snapshot-controller/ks.yaml b/kubernetes/main/apps/volsync-system/snapshot-controller/ks.yaml new file mode 100644 index 000000000..ab96ebbb1 --- /dev/null +++ b/kubernetes/main/apps/volsync-system/snapshot-controller/ks.yaml @@ -0,0 +1,42 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/kustomize.toolkit.fluxcd.io/kustomization_v1.json +apiVersion: kustomize.toolkit.fluxcd.io/v1 +kind: Kustomization +metadata: + name: &app snapshot-controller + namespace: flux-system +spec: + targetNamespace: volsync-system + commonMetadata: + labels: + app.kubernetes.io/name: *app + path: ./kubernetes/main/apps/volsync-system/snapshot-controller/app + prune: true + sourceRef: + kind: GitRepository + name: home-kubernetes + wait: true + interval: 30m + timeout: 5m +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/kustomize.toolkit.fluxcd.io/kustomization_v1.json +apiVersion: kustomize.toolkit.fluxcd.io/v1 +kind: Kustomization +metadata: + name: &app snapshot-controller-config + namespace: flux-system +spec: + dependsOn: + - name: snapshot-controller + targetNamespace: volsync-system + commonMetadata: + labels: + app.kubernetes.io/name: *app + path: ./kubernetes/main/apps/volsync-system/snapshot-controller/config + prune: true + sourceRef: + kind: GitRepository + name: home-kubernetes + wait: true + interval: 30m + timeout: 5m diff --git a/kubernetes/main/apps/volsync-system/volsync/app/helmrelease.yaml b/kubernetes/main/apps/volsync-system/volsync/app/helmrelease.yaml new file mode 100644 index 000000000..cfb9c7c4d --- /dev/null +++ b/kubernetes/main/apps/volsync-system/volsync/app/helmrelease.yaml @@ -0,0 +1,31 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/helm.toolkit.fluxcd.io/helmrelease_v2.json +apiVersion: helm.toolkit.fluxcd.io/v2 +kind: HelmRelease +metadata: + name: volsync +spec: + interval: 30m + chart: + spec: + chart: volsync + version: 0.11.0 + sourceRef: + kind: HelmRepository + name: backube + namespace: flux-system + install: + remediation: + retries: 3 + upgrade: + cleanupOnFail: true + remediation: + strategy: rollback + retries: 3 + dependsOn: + - name: snapshot-controller + namespace: volsync-system + values: + manageCRDs: true + metrics: + disableAuth: true diff --git a/kubernetes/main/apps/volsync-system/volsync/app/kustomization.yaml b/kubernetes/main/apps/volsync-system/volsync/app/kustomization.yaml new file mode 100644 index 000000000..5e0988437 --- /dev/null +++ b/kubernetes/main/apps/volsync-system/volsync/app/kustomization.yaml @@ -0,0 +1,7 @@ +--- +# yaml-language-server: $schema=https://json.schemastore.org/kustomization +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - ./helmrelease.yaml + - ./prometheusrule.yaml diff --git a/kubernetes/main/apps/volsync-system/volsync/app/prometheusrule.yaml b/kubernetes/main/apps/volsync-system/volsync/app/prometheusrule.yaml new file mode 100644 index 000000000..880d67388 --- /dev/null +++ b/kubernetes/main/apps/volsync-system/volsync/app/prometheusrule.yaml @@ -0,0 +1,28 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/monitoring.coreos.com/prometheusrule_v1.json +apiVersion: monitoring.coreos.com/v1 +kind: PrometheusRule +metadata: + name: volsync +spec: + groups: + - name: volsync.rules + rules: + - alert: VolSyncComponentAbsent + annotations: + summary: VolSync component has disappeared from Prometheus target discovery. + expr: | + absent(up{job="volsync-metrics"}) + for: 15m + labels: + severity: critical + - alert: VolSyncVolumeOutOfSync + annotations: + summary: >- + {{ $labels.obj_namespace }}/{{ $labels.obj_name }} volume + is out of sync. + expr: | + volsync_volume_out_of_sync == 1 + for: 15m + labels: + severity: critical diff --git a/kubernetes/main/apps/volsync-system/volsync/ks.yaml b/kubernetes/main/apps/volsync-system/volsync/ks.yaml new file mode 100644 index 000000000..ce64ba842 --- /dev/null +++ b/kubernetes/main/apps/volsync-system/volsync/ks.yaml @@ -0,0 +1,22 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/kustomize.toolkit.fluxcd.io/kustomization_v1.json +apiVersion: kustomize.toolkit.fluxcd.io/v1 +kind: Kustomization +metadata: + name: &app volsync + namespace: flux-system +spec: + dependsOn: + - name: snapshot-controller-config + targetNamespace: volsync-system + commonMetadata: + labels: + app.kubernetes.io/name: *app + path: ./kubernetes/main/apps/volsync-system/volsync/app + prune: true + sourceRef: + kind: GitRepository + name: home-kubernetes + wait: false + interval: 30m + timeout: 5m diff --git a/kubernetes/main/bootstrap/flux/age-key.secret.sops.yaml b/kubernetes/main/bootstrap/flux/age-key.secret.sops.yaml new file mode 100644 index 000000000..fe6f113e7 --- /dev/null +++ b/kubernetes/main/bootstrap/flux/age-key.secret.sops.yaml @@ -0,0 +1,28 @@ +apiVersion: v1 +kind: Secret +metadata: + name: sops-age + namespace: flux-system +stringData: + age.agekey: ENC[AES256_GCM,data:hHup+dFxDjW9ci4cKhkxl4DSfIT7CK08HgfeQ+lce5SeeIn6unWM2LXcIA5mkwZGoLOzngAKopwt5aeThWvsLy66Q/XFp0mDHus=,iv:U4CxsaosLMXFngGGWSnutE2GHiUWKxkBDho10AtGsFQ=,tag:3PBqK9ZCbvB89b3rbgv3qw==,type:str] +sops: + kms: [] + gcp_kms: [] + azure_kv: [] + hc_vault: [] + age: + - recipient: age1frhtpr8u3u99pvcuq5mjevxdq9agjfpkd8fjtnpj9qymzh5v845q53f37d + enc: | + -----BEGIN AGE ENCRYPTED FILE----- + YWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSBuYTBhNTk0Ni9QMHEyYldK + cW1zUXV2c0kzNXovMjVxU2ZGWVJLcTJtYWpzCjMxWTk4UnluYVA2VGJNZXdzcjVS + OVZCVXJrWVI1aUhDY0M3YW16Y21qQUUKLS0tIHZaTXBldGNYWVkyemlWOTJQaEdw + azE3ZDJMZW5vdkZaUXBkNTN5MFhGS00KUYM+zFG0a0sej18dQCAS6AkCVd8FrHZp + Lq99sdhQ86xPTIPq2MzxATsPKWM3qOiVUG3lehlF7WKe0YTAAHC3/w== + -----END AGE ENCRYPTED FILE----- + lastmodified: "2024-10-08T21:48:12Z" + mac: ENC[AES256_GCM,data:1jN/pAPbxS/dnA19zPsh3qwFkmS3ve93LJZnOjkhKL//+ohDN6OquhE9xwYjq8/35xJERot68uB3iJ7bu2bJJoLGZHyDtvBuKANwrorSo346g9VDa2rpNx8GhsaoYtpz5fyasmZhAnh5f9ss27DpSSh9b6kp+ToNAWnFXbA/wQg=,iv:3HEC9jObBuaJNMj8dywsX+luIhWi2PbrLgCWpPyCZwQ=,tag:s453WKwqB1QvgF2Yfz5qzw==,type:str] + pgp: [] + encrypted_regex: ^(data|stringData)$ + mac_only_encrypted: true + version: 3.9.1 diff --git a/kubernetes/main/bootstrap/flux/deploy-key.secret.sops.yaml b/kubernetes/main/bootstrap/flux/deploy-key.secret.sops.yaml new file mode 100644 index 000000000..5a72656c3 --- /dev/null +++ b/kubernetes/main/bootstrap/flux/deploy-key.secret.sops.yaml @@ -0,0 +1,30 @@ +apiVersion: v1 +kind: Secret +metadata: + name: deploy-key + namespace: flux-system +stringData: + identity: ENC[AES256_GCM,data:IBYRcWjfwODirFE/YQMTQ0LHaKr9KIXYw3CD6TsKMJ6wIASDV3offgQiykho3XLEdnSBfXWmD7Zs2ooRihK9gVuqs1TkVlcv4E0rVP3Ytp5a25SP6Dd5WcU5m4lXdWBPCG92cU6cNzrdJ7JJT34Ohqg53eaokC5szGQcBT2dpbQCHfekDqNHZ4f6ZZKVdpyHVPRxwDxTqxQNOrDdpd2GdKUXRZd3v8lqGEIWnlys4WelUq4jTaZ8fjGVCpag6zSMtfR6WFHDJ8P7FH2M7li4V2ff+uzkhXgxEz6VgtyFBAq77IAz+jnf6lB0w1+Y0J7ws8LmYK6whWXBkpmNfDs/ehhqTLpCfbDg4sm864k6AaHYKXSGNKMRjNaq+/zUP0xqGc9iYpv++IXyvAHMxVx+CrMNetesXG/SPPm5jpHJILAL0RWxoCh/0fb8uQDxXFy1nk4haY6N2O9zhqi/ofm18RDmBGvkEheeMGDDv2ZSNbqoG2peXBhXJTyWZiuA0Ox/,iv:3rTgh86myohUye3x7r6WFb0gGx+CWP7y8Rzmbj+RVwg=,tag:oKXo8MMwDSQhUbogs6r2Sw==,type:str] + identity.pub: ENC[AES256_GCM,data:6fvhqnzxmGMa2KmuBW+ge6uPUqmJ9v92dIHEgK/jPQjQUbmyWQ+Nh2Azh5VGdiXyFFT085Zmd+KSXI4FelQT3tKZ1YiOJ79loyY/vcP9MVBhbVwYkdmDMFwmSHWB9bnAlC8QMhDNprRDqMYc8GE8IjC/qQfCz0EWuelFvfHgafnGTN1zQNLiDK5YGYG9fsZoSQceOcbwF3Xc2aWRYa812+rQcNxv5dGZsFCY7AxIs13QsfFoSKX945ildls7as0Lsc/UJjrbTq93YOgx2b2zIt7RUWVaKLKoY33vlr5LhhYVRjf+7OVRERk8rgHp90pPvcHg6P1IxX6YdRE31w==,iv:OrPRv3Y7rX9tx/vMrt6MUh71gwWDAvLm7fsGt++v99M=,tag:SQeHvymWVofWDR2vWh3QXw==,type:str] + known_hosts: ENC[AES256_GCM,data:mJ+w+UCm6vGNgwSty0Rnt55f0WYXvWSA47RHEMKJypH9AOvG8seWaHmPsYsaNT19gLqTb7m+R41Ppr13SZdqJK02vqbbR1bvPZBhMEEf2JY15XfqEvFPNColNLcpRCbEScU7tItoXmJ2KRNqZOKT6laRoyDemxnpToxU5v2jX4zmryAvCSap3s6s6eZ2rCOzVJwfXopxyfttDYS+RCj4kxKrqCFFd3PkGvKn,iv:580mGiQNGqQo068EBBsu3wO6FHnFCaoT+XR4TOwmRGE=,tag:S3KE2HiAfCzF7rfW0x/31g==,type:str] +sops: + kms: [] + gcp_kms: [] + azure_kv: [] + hc_vault: [] + age: + - recipient: age1frhtpr8u3u99pvcuq5mjevxdq9agjfpkd8fjtnpj9qymzh5v845q53f37d + enc: | + -----BEGIN AGE ENCRYPTED FILE----- + YWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSAwUlp6Q29OREFPRmZUNmRh + TGN6ZUpnM2NwVXpPbDZSQkFSQjJxNU9IcGdjCkdmSzNzQUFBVXJGWEo5Rm9jaGZt + UCtKcXFQaFdQaXQ3UGxjaWRMS0N3VVkKLS0tIEVMUkVlTVArOXBqTTdJTmVsSzVB + QnYvYUpxWDMweWFKOFQyYk15QzdQZlkKKqlwfunxvU43OeNEQA9qm8w9GNzPywde + C732F7XEYZls8rYpmZdxpIA+x4Yjsl9PpXDuwjBfKwX+AXj32NRRog== + -----END AGE ENCRYPTED FILE----- + lastmodified: "2024-10-08T20:40:20Z" + mac: ENC[AES256_GCM,data:YmHPvl10nOr3fXtJjcL7jaZgxbk6rmSTVPyvdxjUw49eqUnrudO6WfuQVg6+TIWH2npPxfrJxwYT6iQ7bAJd9+2iGFMVUSed6YyV/QSSf2kzXg4WqUA/SyoHNLKZmdgTxIyZkKbFA3ymsFH6qsbV0ulfO2p/I3Z77/qVYvLYb6Y=,iv:6T5rFR+HoY6VsRbLgk33+P5VIBS/nfvSGfCJ+k2NymE=,tag:zxVI0nmbsvq24DJkFAnMyw==,type:str] + pgp: [] + encrypted_regex: ^(data|stringData)$ + mac_only_encrypted: true + version: 3.9.1 diff --git a/kubernetes/main/bootstrap/flux/kustomization.yaml b/kubernetes/main/bootstrap/flux/kustomization.yaml new file mode 100644 index 000000000..744ef2c61 --- /dev/null +++ b/kubernetes/main/bootstrap/flux/kustomization.yaml @@ -0,0 +1,62 @@ +# IMPORTANT: This file is not tracked by flux and should never be. Its +# purpose is to only install the Flux components & CRDs into your cluster. +--- +# yaml-language-server: $schema=https://json.schemastore.org/kustomization +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - github.com/fluxcd/flux2/manifests/install?ref=v2.4.0 +patches: + # Remove the built-in network policies + - target: + group: networking.k8s.io + kind: NetworkPolicy + patch: | + $patch: delete + apiVersion: networking.k8s.io/v1 + kind: NetworkPolicy + metadata: + name: not-used + # Resources renamed to match those installed by oci://ghcr.io/fluxcd/flux-manifests + - target: + kind: ResourceQuota + name: critical-pods + patch: | + - op: replace + path: /metadata/name + value: critical-pods-flux-system + - target: + kind: ClusterRoleBinding + name: cluster-reconciler + patch: | + - op: replace + path: /metadata/name + value: cluster-reconciler-flux-system + - target: + kind: ClusterRoleBinding + name: crd-controller + patch: | + - op: replace + path: /metadata/name + value: crd-controller-flux-system + - target: + kind: ClusterRole + name: crd-controller + patch: | + - op: replace + path: /metadata/name + value: crd-controller-flux-system + - target: + kind: ClusterRole + name: flux-edit + patch: | + - op: replace + path: /metadata/name + value: flux-edit-flux-system + - target: + kind: ClusterRole + name: flux-view + patch: | + - op: replace + path: /metadata/name + value: flux-view-flux-system diff --git a/kubernetes/main/bootstrap/helmfile.yaml b/kubernetes/main/bootstrap/helmfile.yaml new file mode 100644 index 000000000..52b56318f --- /dev/null +++ b/kubernetes/main/bootstrap/helmfile.yaml @@ -0,0 +1,90 @@ +--- +# yaml-language-server: $schema=https://json.schemastore.org/helmfile +helmDefaults: + wait: true + waitForJobs: true + timeout: 600 + force: true + recreatePods: true + +repositories: + - name: cilium + url: https://helm.cilium.io + - name: coredns + url: https://coredns.github.io/helm + - name: postfinance + url: https://postfinance.github.io/kubelet-csr-approver + - name: topolvm + url: https://topolvm.github.io/topolvm/ + - name: cert-manager + url: https://charts.jetstack.io + - name: prometheus-community + url: https://prometheus-community.github.io/helm-charts + +releases: + - name: prometheus-operator-crds + namespace: observability + chart: prometheus-community/prometheus-operator-crds + version: 16.0.0 + - name: cilium + namespace: kube-system + chart: cilium/cilium + version: 1.16.3 + values: + - ../apps/kube-system/cilium/app/helm-values.yaml + needs: + - observability/prometheus-operator-crds + - name: coredns + namespace: kube-system + chart: coredns/coredns + version: 1.36.1 + values: + - ../apps/kube-system/coredns/app/helm-values.yaml + needs: + - observability/prometheus-operator-crds + - kube-system/cilium + - name: kubelet-csr-approver + namespace: kube-system + chart: postfinance/kubelet-csr-approver + version: 1.2.3 + values: + - ../apps/kube-system/kubelet-csr-approver/app/helm-values.yaml + needs: + - observability/prometheus-operator-crds + - kube-system/cilium + - kube-system/coredns + - name: lvm-disk-watcher + namespace: kube-system + chart: oci://tccr.io/truecharts/lvm-disk-watcher + version: 2.0.4 + values: + - ../apps/kube-system/lvm-disk-watcher/app/helm-values.yaml + needs: + - observability/prometheus-operator-crds + - kube-system/cilium + - kube-system/coredns + - kube-system/kubelet-csr-approver + - name: cert-manager + namespace: cert-manager + chart: cert-manager/cert-manager + version: 1.16.1 + values: + - ../apps/cert-manager/cert-manager/app/helm-values.yaml + needs: + - observability/prometheus-operator-crds + - kube-system/cilium + - kube-system/coredns + - kube-system/kubelet-csr-approver + - name: topolvm + namespace: kube-system + chart: topolvm/topolvm + version: 15.5.0 + values: + - ../apps/kube-system/topolvm/app/helm-values.yaml + needs: + - observability/prometheus-operator-crds + - kube-system/cilium + - kube-system/coredns + - kube-system/kubelet-csr-approver + - kube-system/lvm-disk-watcher + - cert-manager/cert-manager diff --git a/kubernetes/main/bootstrap/talos/assets/k8s-0.secret.sops.yaml b/kubernetes/main/bootstrap/talos/assets/k8s-0.secret.sops.yaml new file mode 100644 index 000000000..8c5c89d5d --- /dev/null +++ b/kubernetes/main/bootstrap/talos/assets/k8s-0.secret.sops.yaml @@ -0,0 +1,190 @@ +version: v1alpha1 +debug: false +persist: true +machine: + systemDiskEncryption: + ephemeral: + provider: luks2 + keys: + - slot: 0 + tpm: {} + state: + provider: luks2 + keys: + - slot: 0 + tpm: {} + type: controlplane + token: ENC[AES256_GCM,data:3wvhxsDOOI+INSWJmil4ayFtR9khwVY=,iv:FxtvDHzLcjHaCGnnngrmr+dN6S2rxeHP5sUA2G8vcr4=,tag:tDY11lF5uiLmkjFfAVZacQ==,type:str] + ca: + crt: ENC[AES256_GCM,data:4XG0t9H16Lq0UBZu6gwAx5k5ciOHKGk9h1Yp8m9CXy+iMBGWutnIdOI1aomYVgO+cggcEhfvypKKwWcUnLKrKpcGNlfJIVGwzKwdYdtaoIJxsViR7wePRl2x8rkS7hWjFoEHA2IeF4tVJcJcG7xAMnq0q+PY1oMjAkKcphIkf3r/G99FXDqQto4IZJJfXYm7XHjurnVy9nnYJmIYfwa72LkuDWJzonS5Hh0x4x7EZHWmhQcZfhxoF3/BsFzLBSodF1YkjzxiASYAsh4gqOaL0QvoX8CXPO6ey3CKN2uYmenZnlSKIg9HaIoH8E02QmJRZ9VCZMS+d9D+jYG0VghWF/pCSxZvWW7nLIX3zCi9jrGUaysjZiWND7TUA31ySL3j/CTApPxRA53M0iJHW4kNVSvG1gDFlpnw/YsVgD7GeH590Lby8xfWrsvsL+nN1gdC8Sr2YAUyK9B08gko/Ywx0JX3ljtN6s//MG6ul/w8LRgmDhcQZXdNRTZpET2vHfIKXmRsvyirlENRORHP750G/iXccrP08pMYTB8VDGrFIwJpCcinHARrs9YnbolIW2ZbkqMC+cLTlxok6c9dWEstRVDjTH5b+P54tKKVasjRasBNKlw1jnvDHGRCn9IA7zIiXfMfh5mLUVpoZnu4wMuJnP2Gr0hYwt2PbUjUkqs7BQzher3EYI04pZLRjXdeqm4MnN0PqS3TxMLJArQpr4WKg9MycigWxtxw52oA7EVlpSKBQ6YEXeh7J9V8CtCu2O23iRGe6W4BEWUa9Cbx9g6IDwZ8FJz9EC6JmH8i1SJ8b2ttDjMnDQJ3ajiIk4D1yaks075pUIIQ4JHHy0TtIXHNDyzDyScDmlcSKs3sOOQ0DHBQGfdr,iv:fhTgor01l/2E1tilruLATvsDWREzlY85AVzrZFSCYVE=,tag:tIsycNrDPkfwazI6kySyRA==,type:str] + key: ENC[AES256_GCM,data:VHHoj893sjbTKD6ReB52VsTHSGWbvSFL10fl3zZ2CItGC20D0Ix48jmqpp+gfUMtZT91qs00MRDMJSd/5vmtn7cO5VoGlh8qkPPNBPCRdOYGSkKGZYGFlg8CUXkI02lErynl/gLNvbk3UY3ZAtPJS7V2rUsBLCNmaSWdUXRk42+N1qpjRKiilG4GybWeCbMRRlBiCXMexDuFyje6XMtmDkUtUABmMMyN4TwTe0l8PhFnep8R,iv:6GEBAVJu9mxykRMStEMaQfI7HDDTImT+MLUFtCYtf64=,tag:RnfLQUolPW1S15he7xpb3Q==,type:str] + certSANs: + - 127.0.0.1 + - 192.168.91.10 + kubelet: + image: ghcr.io/siderolabs/kubelet:${KUBERNETES_VERSION} + extraArgs: + rotate-server-certificates: "true" + extraConfig: + maxPods: 250 + shutdownGracePeriod: 15s + shutdownGracePeriodCriticalPods: 10s + defaultRuntimeSeccompProfileEnabled: true + disableManifestsDirectory: true + network: + hostname: k8s-0 + interfaces: + - interface: eth0 + dhcp: true + vip: + ip: 192.168.91.10 + install: + diskSelector: + size: <= 300GB + extraKernelArgs: + - net.ifnames=0 + - security=apparmor + image: factory.talos.dev/installer-secureboot/${TALOS_SCHEMATIC_ID}:${TALOS_VERSION} + wipe: false + files: + - op: create + path: /etc/cri/conf.d/20-customization.part + content: | + [plugins."io.containerd.cri.v1.runtime".containerd] + default_runtime_name = "nvidia" + [plugins."io.containerd.grpc.v1.cri"] + enable_unprivileged_ports = true + enable_unprivileged_icmp = true + [plugins."io.containerd.grpc.v1.cri".containerd] + discard_unpacked_layers = false + [plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc] + discard_unpacked_layers = false + - op: overwrite + path: /etc/nfsmount.conf + permissions: 420 + content: | + [ NFSMount_Global_Options ] + nfsvers=4.2 + hard=True + noatime=True + rsize=131072 + wsize=131072 + nconnect=8 + time: + disabled: false + servers: + - 192.168.91.11 + sysctls: + fs.inotify.max_user_watches: 1048576 + fs.inotify.max_user_instances: 8192 + net.core.netdev_max_backlog: 30000 + net.core.rmem_max: 67108864 + net.core.wmem_max: 67108864 + net.ipv4.tcp_rmem: 4096 87380 33554432 + net.ipv4.tcp_wmem: 4096 65536 33554432 + net.ipv4.tcp_tw_reuse: 1 + net.ipv4.tcp_window_scaling: 1 + net.core.bpf_jit_harden: 1 + features: + rbac: true + stableHostname: true + kubernetesTalosAPIAccess: + enabled: true + allowedRoles: + - os:admin + allowedKubernetesNamespaces: + - system-upgrade + apidCheckExtKeyUsage: true + diskQuotaSupport: true + kubePrism: + enabled: true + port: 7445 + hostDNS: + enabled: true + forwardKubeDNSToHost: false + resolveMemberNames: false + kernel: + modules: + - name: dm_thin_pool + - name: dm_mod + - name: nvidia + - name: nvidia_uvm + - name: nvidia_drm + - name: nvidia_modeset + nodeLabels: + node.kubernetes.io/exclude-from-external-load-balancers: "" +cluster: + id: ENC[AES256_GCM,data:xVRokU1BOy7gH/z2vARbkUFl2XLVohlqwJjlCf3ITKshpG+Twi1FClA7dyg=,iv:70nri+j0isU6wLJV+IqAq5oR0Vg6/uL70fANWasCfX4=,tag:xuuUqKxWs8lImZJ/M7izhw==,type:str] + secret: ENC[AES256_GCM,data:f5xTqaU81tWcVjxaBhTtN4h3OehgqAbNcpjCStvp5M6ZnQ0shsA2SaEHvDE=,iv:wW+Nvl/TsJgxVrAFgv8gE7u+pJw24vPeGHES2pfK8TQ=,tag:TPH1uKo2wDPrsivcOp6cgA==,type:str] + controlPlane: + endpoint: https://192.168.91.10:6443 + clusterName: main + network: + cni: + name: none + dnsDomain: cluster.local + podSubnets: + - 172.16.0.0/16 + serviceSubnets: + - 172.17.0.0/16 + coreDNS: + disabled: true + token: ENC[AES256_GCM,data:+oPjJaHHd5PbixAO/cJ0dpVs7ccIoec=,iv:Ps0yuCV9n6SAzopMh42McgaY43MHHf/9lkGhS1TRKBc=,tag:kM2pNBT7wizA8MvkLA7Hiw==,type:str] + secretboxEncryptionSecret: ENC[AES256_GCM,data:1yEWFZCxobutBO6TDFYcPBlt07Os7vKx58zVLvM4+vMhGRr7cBvcPKttlHE=,iv:u9hyNzCBjLMwzru/SQGDwFm8CnEQuW/LI6QEPpcw3gk=,tag:QO4VhGYoMSVVT+tbwGsk6Q==,type:str] + ca: + crt: ENC[AES256_GCM,data:mgWX4VRC0XkOBQVQEhLnKbFAfogGmpnWIWb702oGX8+oexcDzQCUzQztRkA8ZtzsexgtG4QdO5NvZfGwWmoHRpjibkU+yfmg/GrNyVHW5Mf+PZAhakGBnFeSCFq2eKBmJ5+DTVhplXJqey1dJqY49QSpiiCxlruGNs1y+u/PbdC/SYYXup3iPMXqMqgBdziKSwl6s8KrDxH1R1CezMT7+Llngwyl56GDiEwD3oMG8fK0Gak6SRGGUYtXjtyrSwrn5lXScPHR2HDMI5ekdWOn6qTTaVu7zcMgTtTOgXiv+JNc/mR/yqiQmV6ciMJHj8WJg8Fh1WhMoTKXJHeaIDKnsNdaNmTXoyq91EyRg5Uq4+FePrqYA+WkK2x94W0D4kMuetFr8nHpifHrSfTgwvYfkpI1CVCzFiQ/8S8k/idftXp/frfSNi+ci4qZlEmF+NOmQr36hhjldpCJ7NOvakO6LLc33YNkEaidMzYhF3T2zl93h9eDtL7AyuqKgqb63NCfywwvCz5U8M4yzl7tcwr0vJ7FbwZDF3hJxPgRES0QizC9OxhmAag6YXC4yj58EZaozpQ5MevqwnEEppntU/UIanvgKAagUG94Vszrr4d0TuNkh526mglAvLxreXW3r9EaBcsS9yLwdEHLfekwRO1HPSUrgBSCvvVaqUMT/eQY3fLskOgIDuOOEIfVXNlu+nWTpw5YsPnwRA2mxeMKZ6zOijDPsrUVkf8WIDG8MZO0yePJzQ63aDKU+V9GO1QhaX3Uy3dm3M/9ONPythrh/iEO5sGRVYAxpJFVkiYp45zGPiidIzgaBgAwy/an/aEzSfIZh633vazgUv6TpbCVfxQUhlEvWmaGo6DvnOwg/VUXsg5iZE3RmpPm0zh2vu4PcC96mlEAqS0kfufBVK36o1KcaDevtVHVjY+7kiqp/GlB3nQIq7ddoyFR0YNAG3NzQGB0ewLf7ezPFJuPAqXM4Hzv1/6y8vHcXfZ67eHJ+Y+GtvZblOzSOFpzgYcRmZL4kxCOydwZVlHTguXUX5gGyZ2SNjFpKrcRG1Ff6SvsBA==,iv:LPHpagKOrdNoxyvCBm33JIXjFR4Ml2MYE7/YnICMUT4=,tag:nyuuPttF1ix+UBQDU6dO2w==,type:str] + key: ENC[AES256_GCM,data:y0ABuqx9Yg41871VrDuSrkblFfFKlvR4XXc3Xy13Vif7Weob7JyOg2EuOQB3NeHNspsPmdALd3QacnmqWBoDmdBJSR13yMdlE+7if0NHJzrVRgyDTIuV2vuVozeQuzyH7F+d4Pk5AWJVDeHC6kfN052I2IlslykMGJ73ceqNdUtOVGVTsntF0hlgjf+MX+l92/iiSbI078VDPFORD6Y6WHBKshSTPgypg37SYHOkOelwyx9YCas+O196TRSrJf8edXnsdjjW6yleSLVTRDw9Q/hDSa3KmAi23S9mzmDrgsxXVm9d9v4e26xzpf3P61YlpMWsBC2cSu78FppWN8P2xK/tA2/m8HFXNly6w/8DpKUc9swkThcafkpNmJ/EYbebtHf0xAHzUfQv1+9uOVUZgQ==,iv:M1ziO1JmwYR9cpnPR05F7Mv3WF5voIpFDQXCXVUgrT0=,tag:Wu7Heuf3pL3Z0twtiL5S8g==,type:str] + aggregatorCA: + crt: ENC[AES256_GCM,data:HKKpRS6RnXq0WHKNisIf8W0zsDFqXVsDi1exjeZ8oI7pKWS64He8+oQCaDBqi3mAUt/KveDAvqzn1c82OiNr08Ps1LsVpHHVlTLOi+jl0icqBiNrcRBI1z9oiIMQ3rn5FhkF2JKLuDisBc41jbv/QOvObJWXLC6N+eUqGJNq7To/DGooPDG1SEaLmNn8ztl49qcAdlmKwaMZo4TD22VmjZ51uPjkPxgJ1Nvowk/dgBrnD+CPboKmLag/GHR8TfnfBeSZ4Iw7BnKQoLlIBuIu6ZyILzfg69RNzbGPB/Lyh8C5EcFJnNRtp0QLKmy6bSNQ/On2r2v3ymB2KoFLi3Wh/AZDI7N5kOHUv7yCSFiK+SWRbyoiJ74t8nIBA3mQIWyvznGkuU+9y7Ah0Fz+QK5fcbxiB/nsyorfVeo8aDk6hPMBHRQPBLy3erACTfkYgiU0UtehVxETcHU5CgKpxYAcMNuYmqyNFhKTRIFFN4xOeh1KK7EqXTljzR7B4CtmKGHdlyCHMUd0EVZb51U/onWb+YC8rvLWTr8V1HVvs86kUxInVUdCSi4i/RaKtoYya8NZbqlbUxH7xDvu90z0gPEarALfXJ4fq3kR4WgMqQq20Rv/ByKQs55iImSxFPwb1DlNd0YL4Bf/ekiV9RWb+94uK37805e5hkavtkCYCUrLOtP3NDtaZhpQAcniqs+vRswHMqxnhwAhwUs2rrc7+mB+TMja1FVWwCJ+tmc8CuSQtucKZOxWBg4DiwdhJO6yvxjhGLOGLvQIBgoArG6QQ41nDnTDHAVTAsZRZo/GdYj5whWDwxUUnsXB50u4c2PD8pqu8zUw3EOdSRGbqyh1DVcK5P4Sx9UaldF0l+manNCrVQMBkeB64+cYddZ402YfxK9ODJQdV3UQ6lKXHpNXt3G2LydgLjW/qNdgakNGWj7cqQv9lkBOSY1BUVgGE/mEeZv4,iv:OZRtFmIgIWudtR90pr7E0xpEv2WULle60ZomGwLqqkM=,tag:Hf0tVv++ZWfoO2ha0sxFPg==,type:str] + key: ENC[AES256_GCM,data:7ll8C2CQUX7F3ugGkc9DHAUf6dUy+CpQEJD1ptIXenTTHzx29eXTt0M5nrAck8bHX+a03OQnLLnH2u90vvykYv+xb1ZkNBjTrlaAklVFXhqnHcK2cC+fDGJpZ7tzUMcnpR9lICN4cZt+WAU6Fcxu/aoP8ZUTYfj/7AoOD5mt1gnt/9GKMJNkLnumutPULOxuJMA6Wy0+Y8GlhqgMtnSQBKY+RVHlxta/gxx/trk+TPrv58Iq8/aSVh/U8ZeLo2mHeO6FKn9ti4Dc1DPiKdJluRsv5qQNE25srmJRqgf+jk1ne7LbzklCzPAcdpHJMMTp13EAz2NLZWP3wW0GsrQP3lqpEE2VLCNRsghk7236NsnzXR6C2NZSes4Sy1MgwP/XO55QHpGVBvC+n8948hFJTQ==,iv:kXQtA4wLHU9ON6nggQDCtgYDT3Qah+rviRcHsZjYP/Q=,tag:6qdCaxj3eKVJMXkllGM4bw==,type:str] + serviceAccount: + key: ENC[AES256_GCM,data:Ps4upLlUf4TBqi5lMVFpVYfKvMQiL6flF8tUzV9cpAz/axgiiaJ301oIV37xKBqFGCK6FbR0KqF7QSTPgNF6M5tN4mxIlc816Tlec6o58bugt5iswmHrnK1sN2llaic09Hwg0YQ/EuKlZa67zlvYVlmIGbXJ6t/Nj0EjqmyZJgu35rMepM4BPVTwJYCA6GSbkknQUtaJYvEHoVw0tUfEjgWKFoSPlI/d4N8H4N5u6nT2mMkgL2qDfNAjH9Qz7wQ0c5xqVfidCHLQ+4rg4Puxyf1MUqzMqcHcgj8bTyUlIBE0INZMQGe4cKz3AYLWMfM70BIGZHmqOClpSh+RcuCPGzsGUJIzyyBI/uUrWtE5qTboJZrTH2A8Vx3cfF9Q2b1UdpUy9G5pPPGsOXJnZLDzs5sdPd5Od2AlKcE5Y5DEpYOxMgAUnkSroNdDqtqPWdXugGyW65ZGiimVh83FsnnFgD7xjMsNjOdbzfG2zSaUo5T/V9/vNWF8w3Wq1Jn9mvSkOQxznNbmQ9D8NOEZ+RX062XqQCBi1597yjsuK3lWfr3gXjIHYrKouxuGzWH8lgT0i9w6hBb9W/lR9XbQw2oUDxNZisJuSuDK5uJn40e/M8+YyiLgxcpd9az+4hSGJBVeEBH+CujOGHn+WLfykrbek2plOI/vPuS2CyCMmfmJosCEgAD9EaqRUEBsQ9lAkgBGhFWoEeGHlk2bnvSzR4xOBIHESpYQT6erSzTUGOrovWcMZxScN0d+iuYKMkeZlORFCnhhF4XCRaPNfo+l73Qmr2mXN21H0ddDx77tY+lG5Q3+NQC7h3gBLBS8wPlOD0AiWo+vjdwFJWtXaI8NbslWRra95Wr3mylrUkEvIG77UKyBHINzPT/ImomveebvhQnj/1lb+xyBz/Gw+sQaHqCPe7HMp6VICfQmuHRBo2SOYM4SOPRTLky+a0t2oUYnn7qn3RMetovFaPDPI70+FZZBCzJgdis4cUxc5Xv8r+pn9IiSWyarRtJXVxPyusyRzf0brBDkdzIYArvVB2NpfGAmSHjO7GOc/L8YDbpbsCDoWWWBFCKNW9Nj4Ziaw9LyOrgb5RzrY3JYQcFPdPZMDXgw51uEUwdwD4ktp31zilCDn3bmgVSZ+XsscjiWHq0KF2D3l99KA1WaRzo7wLj0nJEWth9dzF9ZLz6gBANG5HiwjjiEbI4G/9aLMEPxJ5+3fApHy0WjxUrCQZ6nej1WTXEZu6gHimHgukXILARJxzcFB46coCPTKS8gxfDoOFui89soPnS4fQVJM0CDeZoCA5BYPBrvI4d5jHdybFRzeGN0ANU9dfV0P9RVtwXfQ9URn6Ss6L2T5wnPcxoNkchPJIPOr2Ut/lo7u7dBquYniA0AvnnKPg7miBdUJvVwRtcvoDuh6lNlpa1EYgSln32mEpNVwUnaujlf9jkCTWJh3Qvv037MwxKnhtTQelkRIGS7UbIXy2XqgNOhtZZdEcQbN43YK6/CB6RaXGIwYqAMlfo6sNgkKg5E48gpSrP4UdsXV0ucLthUUVw0I6Zv3Ol7LjxRhIsoHkkuUb52hHBpxmNgDLKA8YBK4sD7liOYCnEPXzvCBhHhXlgNJwpPP5ZRwwZeOu1NcIdE+mYjnn1IwqJwJ1MmzLfY9LaYEZcPCwDefWCn3E+kTKFHsT7uzF10PfbB8o+ZoN/SU0H79OaFxp3scvNlfFGdDq/6kg0mK5OYesoFx0dGXHQtpdHJ9gypQ1ZOVELL57dxc5iE0o0QcvSAJfuGECr0NSd8n5i0XDs7wyNqdB2TvlD4LWq43ooxiIKgfOXRhz5hZZIFk3Mw/EGqh+s1PV6tT4rk3JmVXRDvmrT6SJhLAe0ycZtU8BIIsbOElGbcRSUHJXh/aAOgDPBc+QmHCaPBXAT0DeNqe81LfBz9h83Wwm+jtDVyDDNCRtcr5HIwTEwfQlce/kG1oESMhEIGSmeasBBrm6NIWt7k4x6ClvFGg8xa0tLnynJKBXdQze+CRSRMySBiLQnt3a+yOWK48bNi3WtzSaz3sxsNN4fMHzcMm1H2UAztanicAYXegapCQE4p+PIT6DZ/dW/aFCJx1c0EFIWqFKlHzs78B7YB586LIVZ0SGOBB/LTZVqE5p23YfMhO9vQIvfvLT0xtvjFZH0Kd+Q0pe8d2Noa17HEH6oChEzHrg6pZtbJXx2UGE9VZ3+2vRxwRUqjJEXItvyJHhlYv7CDDsATWnr4DhtMAkC2M+L0T0y488UtLEihpAfjVxKBOmJ91H384Jh7qoRx/KXB46pND2VvlRBuPQFCJc4pLaJm1nQp5IPLsMo7uPM87PLO62au3bJYt718ld0ikH7gYxa68GO3XxL/ciUOwk+B+zDAZm+mKiOekaO74W/DSQuEQobN0crtBp9V4q+ppaeZwlDgwkMl7rMjmBOiZpeXqIsea9IXtqs7WbZwUUSLDAOoMIo1CrTNon3MNAQ9k+jQyYy/uzRhD6uT44999PrcsDE9/tpiQ+IRXL/5fiflOViQUdpRFYbamnyt7+WuQyf2U3DbYDyqPyxk9wMZTnIj/6lmGaFPPclnf+MseeIJbjodVlyK7S76dl5Letjz0HSNvWFHSga8rZdWLEOBoEnm/MC4vLCKjzLkzLcmDwLYdWJzbwus13+kA0EEpRiw5YxZ11QirQJ9AnccE7FfhRt95mnqW5C0RFcx7/QofoBxcr6ipBYk8/YWwI6hSeCii3KzynzQVHvoK/QKvQiqvBHIuwH04Nrso4BfnXhGJ3iVli8jbRwcaJo9KQ3Ixc7W8sUp07w2+k4yd25jwR5hJfujZn890Z/aMrpOz3JXcPV988/wPhOYIhzH93iO9zPT0g9NVSPqyIk27ifCam9TIVS8izNovxoespTKDxx7S/pWr12H8CBESiDKcYM/blqHK3zxH+Zj2FzVJRmNqNHUI3vuF+9CYNvxn+MXdE/x3NrqOFIFIU3M328d0lOqP8JD2mqgkY6s/9kWULHcaQj+0p0/ZNqALouWrLlWYY1P13/T1pxugOVP5dwK8+c1J3lehU6wwFcCy88O3UM9B/zRna6U95g7VcPoA8a08KQWeEunkACBQgFq10QetfSPzNOE7jKYk22FMT08JLzGCBfmfwijOTfZ6K9SfexDaIRcYAGCs0T63WotW5PJJFcfGtGA/JPPAZIvktCilNfb+eKYOKbna4DceLG9k42DBSY4NJMWpBFP8TXq1DomfvC28k+RcUtnJL056zGCwkGhrWdXRXF9lu7teo9hXUw07CTI7M7hMDotIOzBE53drmT16kGytwoJcYtd98G7+3ZIgkMSAo8ns/fZ32+ME6homgluuNytwR88VGxH9yefHvtfHZbrFQ4lMwpEkJWbjvjnDBJIXoHPp9q63Un036n9aaTEJ3R4eG2MLGlNyoC0c/HkkgZqYGFROnfziCDSVR48p4fEZvld2WkjY7PzmLMhx8k3/FNwmgCdf5PhHM3LyWJkDU/Pk+If82u6TwpOeX7akWLhQqEWsSeH8tR6gjejklketcH/ZhL3u4ShXVZOxwuvHTpkfLLlrqplWs5qkXlEy2Mxn9HKwOcfH/syNVFuPVA8D9YIyR6WJYBjX9u/pLpJW0WJ3gRDP8Z7TH3sPjIstUYKLKMJJYuRvv35aC0RXksbgPlpvY1NiDkzBM+Y1Ub2RLRoRYp3PkG3TEMOOhUBWAH2/B7a+xNi5ewgYHNGOjP4GHJFONsFLEZPH/RdylGA1TskmywwXmPVy5XJarFwTzmTR9ewuKpjavan6EdlVOtdKnEIIoWPzBn8pX/QVXVr/rp9ZX4gFbrrpq5Ix0VDrsbhcYof9ZWlqFntKn1lDaby9mFk5MwZLAnjqu6yYCCpOcbvs1a/F8B/E/ATBRJ/l3G7JIpYNV9IuMZJEP+JsapPcI02hNcSxzECQ2tvMdKXaffauyUSng2Wt/fYnoijfvdFJG4Km6vaukiM0AHep8n/S6k7800WhA6GIeCVN1GC5okhaAF5TzLpIECJyzqrcz+ZC1GUDxS/oGakoP4Zls0WemN4DduYAL7nywOe3FDOfO/qAZgRUMlv8R6LC6Hhb6366cLvy92ajs5W5k6O+sxWaLKQzMRFSqt06o1L7UPjZ0AzWR97wt6Brbyzw1ENwj/lXzfoNcP8Z45s4Xpy1lnlT7mNC8ndVCnL4hjeA5YOSmVgF7XH5RbESwPqsffTUwQ8T76p5NuKfIU+zQEFtVtgZZXD64i8CjQua888SsoYQa1guVgtlWFDQ9EuD54Alv3tGNTHzUjVWXKFNFUGQvWr2JaJDtFXwF+aYWdvZPHEbj777F8IiOREUHklmbrKwZEq9nzQbN1bJAfolfhHAJCdnOHxBtVsGibr0FjmFH5UaAqFClHcDUE+KOg+jHDyiKY7Nffm/Y/68IfW907ijPtjCNEDM+tCycQO5YZes9+Vb5Tw91JC2KC1Ust96ezixPqdl1Vo/1w4Uezdq9NBTMJ2vH5MHxplaVuC0V7b8zjOTyJedqohMpVyQL63sJny7PBUp4nQhL5lL8VQvNAT0J6qEg6s5ptCwImgf2lPJOpC/jGKrCHkz/r94B+WFonaKLgWsvuA2pyqFUYBn2wSWInO68qVKAJQetZmILhMUkQAq6aGuSEDqsDwyYaocGoqxhZ2QCrMvKxo4g7NxzbBCPOijjWCWh+59TkTUxWH59qx0qqvgKuJIDuwBb+3HnaRLPxDkvpRsEKx1fwRQge+UT1LiiP2Jn8lFUo1X1cHLN/n8ccQwzdgHGCzsuPv8DD057auWyRBFJ/pbK/96o3veYUrikScfRPCG4uMvClTQo6nC/FmvijrMFjcU4qVCQIGpkgSQRTKqEsr42RIa94fdtTGR1BpkGfF/iuH407ffh22L3lEQBw3D73ha5gCfQMD8KjBWE3SZ6zQj5O8FdiJDLQrgrdlnMFPnVIeDux+oZJ60JisaxkUk0pYPNbAlFLXQ07tVzG9g7ku7bKeNlCTRXgX/qmjvd0dgKxHkZm7gMBQZS2mOPSFY1pSd8qTuHEzdVJD07S58m54ELmz3JVdQ4CI033LPi7Y3PZNW6Ptso8/pYmwEwlpnvrfu0PqPgv4HBI1s7UjyOuAhGzKSTCXACp5TOLgrvUDIAv5zqJrh674RsiAYNgRKlhsTXvkw6kiYHlfZfK0AKYvp85UAdFLlj3zGig2i5aFIk/JmP4uNnq9DpL64uB6EUvzkqUfK5ZuLeMIIjkxZ1wWby0Y8lrZ7FYtfL+RiiM2MLotqMF2oiWSfK4uBNwpB1o2QyGKPyqRX98lxpleIzGIT1+ixRbI6iNXRPIp8TkTO+zJdm2obuqndftEBdT6mFlINWPdyGFnOkhLa8XRSlUdA7kuAZrtK/Qf5OCMvH/U8KNd9lZokL+JjCwd+QDTo1yYltiQ/GK9k+D9T0c+ZQfm4KEPhR1P34mEiiFMk6X8N0ghYCMzXH7+AuYJDMoiZ6tAEo6TRo3lQ6F1e4zaqxkoj9dWp2k66QhS98m958VoU+QzQmrujLV2jI39AQtB68VQHRbl78t6MucuWY80/U/lcmsQ4c8ny7u7a7X/ubk6mwItC0uAtMIXRwuibicBNDCK9TX+MN5i0lcDG6iSWDTpq5zS4FwEaxq1H5Iwtz+NUtVQU9R7bhNvRwVYhoT5jTQLgCOFXsHHDiMR0QNdri+upvFVeWp/lntL6K1G/wZ2ZZkNrD/TtvWj7knT29YtwXlM3/4P1+JqsSQNBMLU5Q==,iv:OmwV9ovoMlfweh1Z0MaVUodoWEHeUND+IV5EX5Mv2lE=,tag:s9zslIOfvJBjZXx2ReThrA==,type:str] + apiServer: + image: registry.k8s.io/kube-apiserver:${KUBERNETES_VERSION} + certSANs: + - 127.0.0.1 + - 192.168.91.10 + disablePodSecurityPolicy: true + auditPolicy: + apiVersion: audit.k8s.io/v1 + kind: Policy + rules: + - level: Metadata + controllerManager: + image: registry.k8s.io/kube-controller-manager:${KUBERNETES_VERSION} + extraArgs: + bind-address: 0.0.0.0 + proxy: + disabled: true + scheduler: + image: registry.k8s.io/kube-scheduler:${KUBERNETES_VERSION} + extraArgs: + bind-address: 0.0.0.0 + discovery: + enabled: false + etcd: + ca: + crt: ENC[AES256_GCM,data:/9O4C07KvzZwtuo4NVGlck0j3p7+wil4py5QtqrpUMMK9k1pOsZpYdNLPligY0NIvmMbWKpH+48Y7MckyDX7qD1O++hV11gzuZrNcBd88nVCIQmyoVyNro7bVRjGHTr8432fbaYuWtEJo7RTOeGqD5xCqW6kFGqQndIifYEFEf/o8mUPUfKObqQkzFnV+fgSU7GQ8ogZ1G822oIuaD58EtE0/TprmL4y0UQqOPU/SxlHd3oT7/SMCunYvofTiogYMhKhJ/OEoP+qbUJ9RhSRRNCWpCv8H2c61/K5Lh5cxJmoKLLnPFu4crEpm5OHGbqxGCmCamThxF14QjhTm1RyCNoQbkYUfjAhdUTjm5My3/Y6XWd41OiUHlLjDRqSRu+MzHejZDdLZS1AJ/ltU7XHkK4sNX9vDjm8FQuG9xBwOM6FE/zm1FxJP5p/C2MvotH85DI1dUKKaQWCgfdUxp/Hl3ir52ajYbrcsFFbxpcMkv/Xx6SkLysf/GRl7K+8n4youCIblBKT0+n9qV+9iZontHynqNZN/6EIi1VdTHWmSPY8AzaWVZj9MupcK6dobcBpoQDut6f9dVCbq5oj094ynJBr9hhPUtR0+xMbiwFs2QqCKqhfKgEqwIz1hzxMWLoCx8mED+/C4q4rgY1Q5MIGH6otL+d0i/Go33Wlw4u9xaLWMN0aaumdof4Ce1eKjh9ljnqZjQZ3VfoCzQnZRhisnnobClPw60/0jzlXnqoMwem5EORH3wFAvG+HFKpkxPI7M0oDzyDkiQdKo4v1GXr0O5zxMZRqo0I5E4W1bgi7MJetefveqrWsbgmBlZP3spfaWBzhUf1oLHslDpy1uiTy6qYQ5O/1LVRl9MCwUx9BEp/LlimdqO1TPbIL/sUkuugFig5z6cVQ1fhiBQiYD+Wk6Fb2OEvJQnXKLjWceL/Rs93P0vqLnnU43uSFw1WFbDbY1/N/dMJaXj2r28664NVzPjedO/KZ2dZ9J664Zb3MNFxEnrTKpDTouBuqzfE8LO+9,iv:EF7O9Ixedc5kbHoGKvtSrM36N9zCeus4hxzecEttgU8=,tag:XyXDcbiiPs++ClPyYeWkRA==,type:str] + key: ENC[AES256_GCM,data:t9IQ36AoJPlAkXrEVj5CfGP8aN9tYkAbvGDgkJdFlkFF/3bnEJcectGv7XubhG5XkoVTQRHCruALTRkKrT1xC2XU/JXzBJZeofw/GD6BPHVY75CQADdVxC30ePw4KnuGft5TFlLSsgkrOZ+/lFJuuZuzk0ViMPuwjYK8HrWNzFCXWeqX02NWg/OKlcmIngEvYidaUwzeu8iqsNOTyFKoX+cODLg4pBBQftosCHYyPBG0Pf/CqJxnkGRRMy5i1Sku8uScFWDS/iNTIlMBua7eaEKUv9i0k0yq++PSqGV4jeQwooAjFQ7FeZ9Cq14M9rGoH+gaoNj1Z/fSgZwStUZw9W0prlqeFFjPA1D6Cyi6ajmuVJ7GB7dSIIjcjeRkvz+pTNBdfN1BpxJjihd4td3/4g==,iv:4dvjoL73YOB+FC7UviBu0hv4lNOv4ig6FuYBFqC2d5Q=,tag:m+fwobe6yuTxdZx66WczWw==,type:str] + allowSchedulingOnControlPlanes: true +sops: + kms: [] + gcp_kms: [] + azure_kv: [] + hc_vault: [] + age: + - recipient: age1frhtpr8u3u99pvcuq5mjevxdq9agjfpkd8fjtnpj9qymzh5v845q53f37d + enc: | + -----BEGIN AGE ENCRYPTED FILE----- + YWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSA0NE5BQzl6bnVTN2VkN0tN + V0hSN3RyMWtQZzNZSXlYR3djSUNCUGNQOVdBCnp4WXA5alJyTmNCbHJXSFhxZ0Ri + aEJwY1FDcXpzUDJVMlhISzc5L3dKcnMKLS0tIFAycW4wZmJscTQ5OG44UE9lUE9w + bFJ0ZWVDaE5YWVd5eVo4Mm5JQk5hRTQKqnpvjyeWGlkFaO/2l05VFjDm8D65MZDa + jFUsaTpTJYNxxOjuCQ1mzoaj7xl28m8YfDDXgq9Z5vvQ+HV6Z65bug== + -----END AGE ENCRYPTED FILE----- + lastmodified: "2024-10-29T22:34:20Z" + mac: ENC[AES256_GCM,data:r7CnCICysoLAnrt410fpKn6d3Tc8AM0C/IosBGCK5Ksds6qCVOTyUUdqwnX1O/M7RtnuTUt/k03P7VSyAinbiXDBVZtm28kxxkfPC80CGeec4sw3qLWwWBx49Sb+7Os+RjS6/pDfhsVXiA1m/8V3rLHupfUdN+wtejFPcP0PPLY=,iv:l/SiwmIF85fHnfBE0k6ptSk2nwMVczNK6S483s8iNA0=,tag:sxUpIorzAntLinVoq2tn0g==,type:str] + pgp: [] + encrypted_regex: ^(token|crt|key|id|secret|secretboxEncryptionSecret|ca|urls|extraManifests)$ + mac_only_encrypted: true + version: 3.9.1 diff --git a/kubernetes/main/bootstrap/talos/patches/all.yaml b/kubernetes/main/bootstrap/talos/patches/all.yaml new file mode 100644 index 000000000..49d77fe4b --- /dev/null +++ b/kubernetes/main/bootstrap/talos/patches/all.yaml @@ -0,0 +1,69 @@ +- op: replace + path: /machine/time + value: + "disabled": false + "servers": + - "192.168.91.11" +- op: add + path: /machine/kubelet/extraArgs + value: + "rotate-server-certificates": true +- op: add + path: /machine/kubelet/extraConfig + value: + "maxPods": 250 + "shutdownGracePeriod": "15s" + "shutdownGracePeriodCriticalPods": "10s" +- op: replace + path: /machine/features/hostDNS + value: + enabled: true + resolveMemberNames: true + forwardKubeDNSToHost: true +- op: add + path: /machine/sysctls + value: + fs.inotify.max_queued_events: "65536" + fs.inotify.max_user_instances: "8192" + fs.inotify.max_user_watches: "524288" + net.core.rmem_max: "2500000" + net.core.wmem_max: "2500000" + +## TODO: Check how we can have this pass checks +# - op: add +# path: /machine/udev +# value: +# # Thunderbolt +# - ACTION=="add", SUBSYSTEM=="thunderbolt", ATTR{authorized}=="0", ATTR{authorized}="1" +# # Intel GPU +# - SUBSYSTEM=="drm", KERNEL=="renderD*", GROUP="44", MODE="0660" +# # Google Coral USB Accelerator +# - SUBSYSTEMS=="usb", ATTRS{idVendor}=="1a6e", ATTRS{idProduct}=="089a", GROUP="20", MODE="0660" +# - SUBSYSTEMS=="usb", ATTRS{idVendor}=="18d1", ATTRS{idProduct}=="9302", GROUP="20", MODE="0660" + +- op: add + path: /machine/files + value: + - content: | + [plugins."io.containerd.grpc.v1.cri"] + enable_unprivileged_ports = true + enable_unprivileged_icmp = true + [plugins."io.containerd.grpc.v1.cri".containerd] + discard_unpacked_layers = false + [plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc] + discard_unpacked_layers = false + permissions: 0 + path: /etc/cri/conf.d/20-customization.part + op: create + - content: | + [ NFSMount_Global_Options ] + nfsvers=4.2 + hard=True + noatime=True + nodiratime=True + rsize=131072 + wsize=131072 + nconnect=8 + permissions: 420 + path: /etc/nfsmount.conf + op: overwrite diff --git a/kubernetes/main/bootstrap/talos/patches/controlplane.yaml b/kubernetes/main/bootstrap/talos/patches/controlplane.yaml new file mode 100644 index 000000000..a12add706 --- /dev/null +++ b/kubernetes/main/bootstrap/talos/patches/controlplane.yaml @@ -0,0 +1,39 @@ +- op: add + path: /cluster/proxy/extraArgs + value: + "metrics-bind-address": "0.0.0.0:10249" +- op: add + path: /cluster/controllerManager/extraArgs + value: + "bind-address": "0.0.0.0" +- op: add + path: /cluster/scheduler/extraArgs + value: + "bind-address": "0.0.0.0" +- op: replace + path: /cluster/apiServer/admissionControl + value: + - name: PodSecurity + configuration: + apiVersion: pod-security.admission.config.k8s.io/v1alpha1 + defaults: + audit: restricted + audit-version: latest + enforce: baseline + enforce-version: latest + warn: restricted + warn-version: latest + exemptions: + namespaces: + - kube-system + runtimeClasses: [] + usernames: [] + kind: PodSecurityConfiguration +- op: add + path: /machine/features/kubernetesTalosAPIAccess + value: + enabled: true + allowedRoles: + - os:admin + allowedKubernetesNamespaces: + - system-upgrade diff --git a/kubernetes/main/bootstrap/talos/patches/disable_cni.yaml b/kubernetes/main/bootstrap/talos/patches/disable_cni.yaml new file mode 100644 index 000000000..2b7b65d99 --- /dev/null +++ b/kubernetes/main/bootstrap/talos/patches/disable_cni.yaml @@ -0,0 +1,6 @@ +cluster: + network: + cni: + name: none + proxy: + disabled: true diff --git a/kubernetes/main/bootstrap/talos/patches/disk_encryption.yaml b/kubernetes/main/bootstrap/talos/patches/disk_encryption.yaml new file mode 100644 index 000000000..8ccf23098 --- /dev/null +++ b/kubernetes/main/bootstrap/talos/patches/disk_encryption.yaml @@ -0,0 +1,12 @@ +machine: + systemDiskEncryption: + ephemeral: + provider: luks2 + keys: + - slot: 0 + tpm: {} + state: + provider: luks2 + keys: + - slot: 0 + tpm: {} diff --git a/kubernetes/main/bootstrap/talos/patches/dm_modules.yaml b/kubernetes/main/bootstrap/talos/patches/dm_modules.yaml new file mode 100644 index 000000000..8b3369a4a --- /dev/null +++ b/kubernetes/main/bootstrap/talos/patches/dm_modules.yaml @@ -0,0 +1,6 @@ +- op: add + path: /machine/kernel + value: + modules: + - "name": "dm_thin_pool" + - "name": "dm_mod" diff --git a/kubernetes/main/bootstrap/talos/patches/nvidia.yaml b/kubernetes/main/bootstrap/talos/patches/nvidia.yaml new file mode 100644 index 000000000..548ede335 --- /dev/null +++ b/kubernetes/main/bootstrap/talos/patches/nvidia.yaml @@ -0,0 +1,10 @@ +- op: add + path: /machine/files + value: + - content: | + [plugins] + [plugins."io.containerd.cri.v1.runtime"] + [plugins."io.containerd.cri.v1.runtime".containerd] + default_runtime_name = "nvidia" + path: /etc/cri/conf.d/20-customization.part + op: create diff --git a/kubernetes/main/bootstrap/talos/talconfig.yaml b/kubernetes/main/bootstrap/talos/talconfig.yaml new file mode 100644 index 000000000..09c211997 --- /dev/null +++ b/kubernetes/main/bootstrap/talos/talconfig.yaml @@ -0,0 +1,60 @@ +--- +# yaml-language-server: $schema=https://raw.githubusercontent.com/budimanjojo/talhelper/refs/heads/master/pkg/config/schemas/talconfig.json +clusterName: main +talosVersion: v1.8.1 +kubernetesVersion: v1.31.1 +endpoint: https://192.168.91.21:6443 +allowSchedulingOnControlPlanes: true +additionalMachineCertSans: + - 192.168.91.21 +clusterPodNets: + - 172.16.0.0/16 +clusterSvcNets: + - 172.17.0.0/16 +patches: + - "@./patches/all.yaml" +nodes: + - hostname: k8s-control-1 + ipAddress: 192.168.91.20 + controlPlane: true + nameservers: + - 192.168.91.11 + installDiskSelector: + size: <= 50GB + #machineSpec: + #secureboot: true + #useUKI: true + networkInterfaces: + - interface: eth0 + addresses: + - 192.168.91.20/24 + routes: + - network: 0.0.0.0/0 + gateway: 192.168.91.1 + vip: + ip: 192.168.91.21 +controlPlane: + patches: + - "@./patches/all.yaml" + - "@./patches/controlplane.yaml" + #- "@./patches/disk_encryption.yaml" + - "@./patches/dm_modules.yaml" + - "@./patches/disable_cni.yaml" + schematic: + customization: + extraKernelArgs: + - net.ifnames=0 + - security=apparmor + systemExtensions: + officialExtensions: + - siderolabs/iscsi-tools + - siderolabs/qemu-guest-agent +worker: + patches: + - "@./patches/worker.yaml" + schematic: + customization: + systemExtensions: + officialExtensions: + - siderolabs/iscsi-tools + - siderolabs/qemu-guest-agent diff --git a/kubernetes/main/bootstrap/talos/talsecret.sops.yaml b/kubernetes/main/bootstrap/talos/talsecret.sops.yaml new file mode 100644 index 000000000..14d822fd0 --- /dev/null +++ b/kubernetes/main/bootstrap/talos/talsecret.sops.yaml @@ -0,0 +1,43 @@ +cluster: + id: ENC[AES256_GCM,data:9tR1hqPeTg+NZ9ywuF/f91yRF4eYqbpke0h6VlXF+QEoKSASo0rxbkzsdcA=,iv:X/aTRkyPUBuBV5kgXGWF/Z+vc/xAu1MfTdPZkxYMTrE=,tag:LdF4jREkJ4eFCVp58k4AOg==,type:str] + secret: ENC[AES256_GCM,data:C7N2iQcSHm5AzBvvUEd93xRpHHKx6HGLst8ewozp/8rsLbZ4dVLCzz4PYpY=,iv:R22QYNSBd5WBiAMAA/wD3qMplVEsq0+KjRocdPyqFIU=,tag:GpwP5ExmZpbM3CZaauinyw==,type:str] +secrets: + bootstraptoken: ENC[AES256_GCM,data:hz9sHQrdWob8BcY6tjQhLFJoQ9LLC7k=,iv:mrNLJOHoqI4Bxup1kxCDo5E6978z5sE9+4Y/uD0uCIY=,tag:2fGorSOJGTdaqmGO0mrS3Q==,type:str] + secretboxencryptionsecret: ENC[AES256_GCM,data:mSlNJTz9Uqk1sfk87Dl11UIvv+Ac71FBSw4ALLzd8dmh/X57tcGPBdWte1k=,iv:bUIQ+Stp7DL3afZkkbZGopXggUJTsJaP7bGNvedzngI=,tag:hSVQA6eIBPdq17r1RQ5K1A==,type:str] +trustdinfo: + token: ENC[AES256_GCM,data:Uuw8Dwwoc5pAGu6o6uDaBuWol5ayVKc=,iv:ae7GHaesJVJ6Jtd8qnCBDFDLmED3gZi7XoNl2nizsVE=,tag:7qQ9AP2hkoZ3GUr8JRi04g==,type:str] +certs: + etcd: + crt: ENC[AES256_GCM,data:ZKnu8QPRlyiHHdAjTgV6JNQ4f4SxdiOV8hGFHLrl2ZEvIiaTyVaUzVpwzM9DEtHrAuGLKUD0fZDsPSXfobUWguOEIn3K7CSmX0qiTfKbsuMPj+ZdbIdI0nBP/1/L/QrYSRvpsH9hniJwgVEGSr4YfOiI8XwIvkBGb124Fo9Ogckut5r/Gfl5TxJVkjB82dH/Su3LL3TZPU5aqOBi5+JXNPxgtaBx4kolaqliPNRfSZN46kWOiAJCq6YifaDVlV/qCGiLkziu8x7rxs+kOef2uF0/TWNPefsD3rjcMhHvFmvJJhtKzA+itEf/5Dkpy01zUlID1GO4C3VRQUSNn68ku1EQHYKpOOtJ9+HavBrgJAVkMDy6Ht/iOpbg4sXvnu7l3M4OdLBPH70EMKN1X3yqOkTc1e2A2rCle3GqMjMiV0TleE1ZuGCF7/MkfwMwJAKjkfZuUPnBqHYk2gUAFbKUj+C/jbMZiP7GvWWLQXExVes++Ct9c1iWx9r+2T2oCDwDTmz/yzICvo2XqDvreEoa1ipCnB3jO7FQbnTbK1W2GpUo0vBuZ2p5VtsS0LlxSwlfqv2W64tyo6jNrZnkcSrj6RSO3Ui8FEzg1sCRlJqlXuuajj968msV+yJv1h0Rt8adQ3yJLBplLWSOD1MdNlQ8IkrVx581+uL1fMF+1pUMLSHwFb8TJ/Nu31puFDV4yrs33nm6r0YDU/jRj7SH5kY7EuTKkNJURcdHnRU2B19rSu42sZ+bHtlr/UX+KHDxwLJHm9x19D+UEfLLdp2or1uQII9CsAFFroQGetMcIuJ4yhU0uA7aC5s4CtjsyYrycOuNgjFzMoDNTE8iS4EvneTudxoDSWzvf2aB9Io9AiiiokVdsK0QgI/VFUBDGXWx0iHCkzsaQjFD1w8H4FWrvUXm5Q6Vu1d4fZY2Ks91YUzXCSulB0DzptdsKnrM8kJk6AVCe2quld/19Fj5qtAINAu8m4qriyN4W8VzleJFjXHt2DAU5Qsvf8/hcB5T4AKBA96J,iv:hJ2Aatzkmn7PJ2lSofUavzOpKzngIb4O4b5XELlX/IE=,tag:ih4/U0gC4rIH64Ket8XZbQ==,type:str] + key: ENC[AES256_GCM,data:YlO+mRsaZhZRMQZxyZkuFbsYfhgWsKFspJLGt+QKZULdFp8bLvEhe7cbb8Lw5xXVARYj6aXPSKFpJtAlsgk3u3wSYb1jXaOWqGRyrZ3ZmMPi5p3QBY/QkmJ0aHVMnraSwkvdaFe/BIkaBvyG5u2kzZpnx/9ucxW5SXuwCZ1yoZBnbjsHAWw70S61jTWHXBDXHy+Qz6xJhrLIBtXvrja2Qzc6eMnPkcTFMh96Ysib7hKtZgCbbnKgnaErV0WQhYvSN15uKIvlqfNtrU0tmpf9WWq2DjwVXTW9zIABJZUUm38ef++JbC3JFia0mW3PM0Z7zemi/XlJE2ZCz+jI065mRMz2Az9o7l67wxcOoCQj0cTz1yvtfki2ciXnjArqH4rxLUsIQDn+83SKRMjYV0BZjw==,iv:Q1NastOzROvd8WwMJn2MJfwE8tvv6Hh6jWCBC73ZnUM=,tag:x3+hcFTUhzCvgeyOaOrzeA==,type:str] + k8s: + crt: ENC[AES256_GCM,data:lF7Wu/fnL6Wh48UNJSF09ofdPbqUvckkhJAOdeQUMbbVNpCe0EvMDS8FFny14/kXTNoyP7oaewEq2m8C3wK5F4rh+08E7T4XnD4TIKQaQf0h97XgEwDyEs4Dlp3rWiV5PUyAcQuTfsTwk9xltXTDAxgK0YjB8c7AKPtcG2ZAMQWeISJ1TH1uDoA6C7YexqTmTN2p55GTs7p0XMq+UGcesj4psp+TMAlSYDM9RIAoBy3uhcuChwkHKDzoG6QvwcvyFcQxVMLo8pIhpEpenkW1jP68WfE5Q5ZkZDpSrDD3Z28k4i9IRw4MKkih49zx87/pd8sibE+d51GZs149/+N06FaruiZOSSixHwgFB+kSvrqPygEYCgoVaQ036GYHUGlyR8iV0PHAVmkoprKJWWS9IWcUgznBpLUVrPy+wUYL3pl6qCcTXOYspJGSWVz6BQo5m1wTcJVERR5DRpWqlx6QAkp6zAoYwZr/qfBO3wIdvM4HSO4bkKU+8ntMEgzZsJRr9dSBClxa5T0GdPSTFdCBbDI3q4Xv9J8NjXB4Mh825Dj8YcK8JOxaj/KxolefTgIm+CMwKEB9c5nC6ka+NiqW+gl4oKMUhWqPgXXJI1jaEzKTfZk7L2/ue3wUKDidUjWFCXVQ8LSqLfPZ8n9sIvf2QZdobPsld7jgtJLxilB9XCkqm4anIYJz8T0lewZY0L4t3nDn46ddOshVIU1DiFA4ynvCUatqw2O+lcUfJglnf5rOAYTukhECqy0bu+8WQMR5tHyPoxyWTLMbaxHWXD/GZ3IDOHa7noMizXYCwNnVOwswWUW+vuMSorSLbSiMtDz+AfJsi6S4dTtMn8Q0fjzwpf+bZNHzSkzaeZrWHP4xQ418vM5Kk9QFrlyR9sNDJkSF+N7A9HaSXE09gRPDnB4BMWBhbDPKgUiZrTP6HEmVxjqBEaapOKTwg6ctp1Zha7ZoDQECfLvCD4tYg5IsWy9egca/0ixwE0CufOWfe9epMrOai0oEye6hJPjmZkYL8km3FB3OeXpok8aAf6/Jem24ou7Y3MI2v/1elwGVSQ==,iv:fi83QMzCG4qDfyjvNA4H0p9OVtIxl7mXjPivgNgi8Pw=,tag:Op5wFv0E1xcF2DOFAb2rwQ==,type:str] + key: ENC[AES256_GCM,data:ywbVpZwXePZ5dlR/BonVk+6Lvic8/INjwDwt3YCeQWPUsrbUxzi75Po+eLfI4McX5i8eFgo4kyCPvP4kySb3wSag5EbWBrkmTMDDHB1jf8WDDy6SXp4eVhgwkQ1Mje8slKbTeCCh4ya1Wxut060Htc0gjyKCvw96JTqJtmdwQam6qMl3Bj4ROTbnGJdInIvHH2HBva6GV3BRUbnQgj7eGkS62TJW04w7JQXpjxIWldnMn/asb3Tp9jhvWE9aevxHWowCrUbJrvHQuoKFkklPQJ3lNKz1VcLxcrTUnaEoxkDYtx9XPke1COBNEl1egFr2/9zYyu5rDE5/kallCNqIr6K5+4IbZFXS/Jdbgvjk9F4dWQhWG4rIa72j5YI4tWJu3j7twJuyYhISaIVyIV1l6g==,iv:Zh5G7IuDR8qoKwkqy+3xydSwAajkjmS0XINI+WmkMbA=,tag:rHGqTIYRw/micUcaGxM+Mw==,type:str] + k8saggregator: + crt: ENC[AES256_GCM,data:GTFBYENoyudEY6v/jE/G97fAL1seKOatgpjfuuLgUz9dfJ/4sQivGXutr4KPHtnMm8SBZvn/KKbtSCKGiwaBwEiMNPT+EfP3vFZKkT0ACDFk50UWr3wAywjTILGILuuz19cBqy9tsAXvnejzNLW/nfI9QzATLuQI6i01i+W4QIGKZ85PWVwVAb974aPZET5MXj4tjw3H0XTJ+CagKDmtQfsTGDyZcyIAOFHw+CRPVv04vn5QCszeCbsSO/O4tq4gp94KngF20koabE+MQxNUROOIipkG8rKpchJPxwMdleW3Joj3ov75NNWoFsOLG5qgaN0gOBISYElQWFifljzsIudqU5cgJldqo7jKpsoK7nVGV0VnrXXZZoLUnoMCVmTeIkZAB38bt/GFGvVzis+DiSos6vLcQu/v1D6HHNqVsl03dw2R2sqNfnfo3mCe1RkU88MTWaRQFDWvYAgAlFoSagEOmaAyJZQB9cQUflhFjahkhw1JXV+XwLeyB38qoDwwJO5SH/vsu9GbwOOP4w7L88q5Nwf/k2aR5A55z3xx+qAHCmAVsReVPwKzur+HumillD2UqolGh5CWBx1liWjzQl4ce6uC1Zm7NBwu6iPLgwolL2DiUfwotrnPne1Zg03Ir7GncATEN24n4mSUrZCYETXXaVbnoNvVqwzScVIbQE98MyaWq5rXthv3glZOu7Ib5/qlIwM6uzcfWbXSCVRYdap57hRPDJZGUWvpglyP5j+1KJA4dT4jfuL1BzrTOGgjGBU8rN6hl5zmV9fTNAPsr+C/e0UIpHSR1VNIhTjkDwpT015G9XmHX11dNZtXEs+R571+9N32scqfD7XE6Ch3yhIKLQXm96zH9RzgF5/AWgJf+2rs9OnYfUxyivd6h/A/tGFrGXxWJgbDMPwRipX4OtCFXWdyb9WE2gF+muywG+c+gBM+J+s8s5+g4QP3bz1k,iv:KTZ2mBy6ahOk14glugVRtyn9EmSkyppKugaAiGVYFOE=,tag:SHFvI8Rj6BR64I8tkBolaw==,type:str] + key: ENC[AES256_GCM,data:QKuk49ABxZu3/g+6GBTC+lr0GIcsqa5TfTcrAKV9LYG39U2BfdyuRk7HlW5M8LnFTwjy24/xN5DtLSpjcQ4RC3EGHUJ7rTZGrEXOrijK4qZuwKd+dx/5blotwdZiJamz8jQ6Rv9eFAU5TC5PtvJSbvaDbn/7hDKabnXEWU3O/GXmGFN7YfUZK8Yi69sg1YK5ESdoBOfRdjD47TO8KeboqRVZytsKvsrbwIHn4j5ZpM7pVavrpO5P6n6bMVcDl60Ep32kxGueaDFnopWc2LG/anBWir9vC+jnHzTeZCXwHxByhFSCqIiWEt/8/nEzGLVwKfjP+t2PRGPYCzO/U9Gr/CcJUR+pYT2t0iXE1QR33LSlzabbVkvOwE3Sea1vancn0pO7Qzb4B0Fod/fehwr2YA==,iv:u9gdKEek5d9l8dksd+ar+XkUFh6zZ/dilG6thlAhmXI=,tag:Hf+RJsLx8dsyFeoNzKUNPQ==,type:str] + k8sserviceaccount: + key: ENC[AES256_GCM,data:ZfYNoItPmosTriG1gBB6q8xXVCFHAPndHQei9L7VD2V+8zLGDuzTT+D2x7Cx0srx74kQ5W1+hRIL3ZtV6KkaUeZ8TwnEOYSV6TSXXZGXd/842AcSV6T+H6F9AWwJI6rCB1lxo2TiHLaEnkHFoHzosvdIZ4PBYn6jGeEUrSdWhFdnp4v/iUVq3AIr0KT8lVuX8fLSJV9QauPqi00xv6kYREHyz3hgUzO3VxTWkZAo1Cg7+Ge32wKQRPlD65iRtFNb301v5COj0dgfZiFwZ2gs6TRutXRibtpxQNnIA9s/Ge1T3qFEUY37/rGNUt15dOUQXcGBCwTT66zTV0jecNQuTyTcy0q/lNpxlWvmm0sITecRiI14EsV7Sh+YVs2YU6G3EMLHNxM3hyb+yNeanajwu74IqsGYQsT/xnKxC3fZx3TvFGOyWN7vhNzg1dbTiBFTkkVIKn+shyEirsgosVesPNfWleecdk4hUpnj7OlJKPb9MKVStJ5a6UuAnkVcwQqmuX4oAfZmygaD2ZBiFSIf8vNVYYrF+/8A/NCDM0sUPPFKUbgWs144JX8DlgmAtHLehcqMOtkyByl4x/w0uQNuQrCGCD6mCLhUi2ojNatuXca/xSphs0oleVj4bUkdhhya6R110T6zvQGxRBnjLiO55PpNjMPDHR698bQZn7jXNrt2zU07gE9Rve9gUDTofAhilb8qxofr31MmYQ/byDcxvSiiVX2bjdiaCrvlmJvNmmD5lSGcUT+1FwMwe00oViz8SDslsglXL9dKLILFaiBytjF1V5G9BO2oVk5Hu00eSOpFPFJgzUoEqgXhto3BDqYbewGpT+joMvCWJ171Vo/AI/1Y3tlHc67iYRzHKQiuhSMhx6cI4ijwIe1JRq3CDca5OshmMcR0znjseru+PPfgQpebtlL+jJtNjL0f/T5QhmB5jRfQl5gwKgFZ8Sk9BhFuOEDXdx+6ezW3tuwCP6ZKaOMjdEKyOTRijRocBq0j/1jY/o38q3d/IQ58UNoOkpZ7RCLHeQJGDrrpFQjfzVk7Buwzkd587sSu5FEoRuDuKTW0pPTW4LrUDv8UOWWikJCMVE+I7MPK6g7DbxHgSb1wBjiCQiphVdwcN0tBmkDFvE0SyIlBlHuR2f4PGpOvbG62nHR0A7aPN+kgaum4SE9+v37ttZKUrkF92B+wRbYtCWv8Uu19RFb2Blmtrpomn+a0/lccl67JQqLclqipMLuzC2Eob036Wra3zwHKqnwzibD8RRRcDkPWnLyM6McvSg1Ihs7HcEcMcqn+XR+KaHaSci8EZd6emJkZ+lM4qr8kNrUKehRyaSnKEkBsdntI8ZvvEBJQ6FT30WZLdAsANTU700v+ajuzWfxJTK4y8u0vKFG6Nd7+CijBWzUB8sqFfpnpVz5KWZEdLem+3+b90BtJlGrSvB9qbhhAVenERuCmmQDXP7GGwsjj1eqDTco9AS+AyyQaL9gqjtcY5A7NxP3nc+gele5ro27BItb9p5CspPDXX3G9v9JWtqjtqmHRZqhqUgmaiMwzCFyLP2dVA/UWm012tebq+f3lekUeAVP+9fr7e7edXIzlI8KKdllVrkZgEWW+0HBciNq+x4mtysxs8fz7bnLB7tNa8tbRC0MzSO/fU9TX/JgMJyCQo6VitUJwFo0FWmYjzVJjK9amxof4Gg4BaJC/UNARls7DtUQuYtmnY6IhDu6/aFMZIHfQUjOAEidjmP92t4PNuOfjPOmxAlY8fjcvP/a1DpTVAxfQwj3OhFQ2GmjU5ndIqu9jU7g+wl0RKIgo46Bp9jnhIVWK5htst9EGeYeRNLLwBzuPvhXE3b8beIgcdSnJ1mNRR4EsoidyKniIRRQAtvQGzomF9U/Dak9c2kB5xomim3UmIDxTEHlrNu7/KF2uWgwyX1yNE70QKCYM6eciKnUFrMdSD/pg4ckJ1joW4cAKnUmftm9h8Lix1U4PEgoVAn50QW3RbXtzFd+QHlgpQ2GUU6+nzDIVvbcRcL2X4L+I20LHdfVTfKCrXiSYUkff27UvO5cJ31CSUqIVrZ/yM4TX2Y2jhDeRr1avx0XD88QhIsNzBIiPkXsepBz03a45xsXBZklUaSCpNa0X+P2dNxXmALYVGlyg9Nhv1H0zrY0dbmC1MEmAhS2UuITEk+3Yzn7vaaMbyK7FWJPAwQIN6BIiTJVs9MlTjSIACY29nvRyJjvgvABFBlmNtO+Bp2Seq8Xy+aakklwTuTHzdlWNjtQcsZ3hmYF2IhH4TSvJP2RVRYs008DwpfBMymuwRpV4v07ntoCF9XkzJ7eVKNxIwBXY8WkwA7NXWZLJTpwWsdYCoNNCVDjJMhqC8yBg9k2vmmWWuCX0NHpe+it/aQvFPq0oRaSouAgEX01XcqVRqIQ1kG1XCRGh+W4DDJZC71JF3zRMPKKfe7W9PBHj1nvU8IA/CqFKdKju+ivEV4Y9RF1sUOlwX1CrabDcfMYBpCpZLMmOjksKOAdqfj5gVWZ7YYUPPNM/vOu3b/5wYfNKg8pe0pOEqMQ5/fzBw+c3593qTx0U2ls7D4Q6W8cG+oMIN8Q0IUauJ5m4i7lg+k0YeKYPUhrceIBrLewCxIg66CtAtOLc7ENOyr5Q7xojWMUaiRcs9DotYFNXV7uQWf2foZN+vdTtrGTKWnf2t0tRIQS7Ms4c1hWT0PfdcdmEhzUf1FDM7VEW8fCGIKkovIOnoxKXgAAPkj5kTbtZHZqDBRiGxICOD4sYUC2f0PWPN61ZqBL6HYjm0/Mo1MEgEhYFtFN5RrNlImK0YZCVwW1w3rYuz8VTlEBSHxVemF2IcOQXt94FrQfto6We6I9mdNkRnfS0fEiiCWiSC4zg1CatSOb5xV/3mw/t2wmTpNxPUqO1iCCRXZLM75eaj8f2MPpTIGZkScuagw3fpVXd7Mv5PjiGLgaGLB00pYTjOfMmBLnoqW/Fk0aE67RtJkejENu2WlpexApOkP/xrvHbwiU7nq64ON0w4bnyf0d3lZoSVnjkbe6O5EItepZHRqvoKSd2cPWN0GbIfRr8nEb11uO35vluOkZDH1kfZL3E97rogm9RrxpBRYLaY2GvhvBiFG9XsKdQvTrQtc2feHki4dCYOmP3pTJD3w8qsT2jILw6Sr0UC8CTlGE2zx2nzFz/ykeu0VOaYA1a+CmyDCRW1BkXyu0NgkTFdXq1h8kPvT+CD4D5yUDILvVXeDlk5P3emMCfP5ArOrtgsWAMdj/Z8DekQ6ochodxSrj/C4nf9TJlrPGJFmg2jkGS/vxIDMGIDBjZPNmSRTmmafa/FKXcb3PECINeUvbQTgNdKiEGYE99FCs2fw7eLCgzfLlm36BK1D+31T357ephREsyWAgIHFgzy7C0RKZFtnqrUHuV/f/lSb7haO/lYhnojrcus9V/+xm4c0lJpa5/YOtlX0CsVl9Sewyjv4rVdBuEDeKj08aU/rtfXisMJicrEUh4e/WYJSnW5rAIpppZRHYgf9VPRKv5JG9VP+fIYaPOvjr2Gv76HllIUURP9woVdp0bt+V3+NS/NAYX7MnZg6fuUzZu6Mk75dLUdbkE1iiej16u88DcudrbU0vGn3mJJrMt+F0spkQJBK0err8LEQql+7EOC4aBimKUwxIEk3XRQgQWp1nFvgm/GnO8kIayIxv/ZWbPPm4ak2rRq/3F+J83PDQ+wqpGWz49NuA8UyK/5hR8wacmjDntKiN0mABetTU7CzlPkmSXdvE37NTJ12Tr75oHaTj2EHTY3AZ8mWgWf0v++6+ecdLJhE/1CJGaU0pZpe4z6Aa1eDmSeN258AvDOoUNTW+zYBXQ1x0/Sq9uoccZzs3D0YmwIJmdRh7awDadB7tv6HUUiBq05PDaK+iNtDmBnXUhk2K0DNQJBSI5bd4u/xKL3SIik1RNeE6MYkp+nyCy2kXJmf4lZmKQlj+kZRf5n7t5w1eEfxePT02gfoOHFS0GT3gCfxdISObJqZ7CgGautqbaOhvhnu2+jUDdclU45Vb/iU2NaEEyfM7ApGYYkXg6g9jC4cEUjFjrP1LIbKcyN0sKF2A7n7wW4nAw3qgDD2/YdzxM86ReN5L8X0CW7PbaUSes2WIdi/DWtcYT5iZF35MHZN+VwRnLbcJO8tFukf+sJPzHkd+NhUt100uSAmqEYAUODgknHYUWCLNPsl/yKm1YT/m+r39gv/iFp+KpIG9w62mNpbSeZwrav4IBHil12bR3Zm6kg2Ln/Hn27gKPV2geOV8GEXr1BOAdELg2+7qR5gq0GBNiYZ9XvA3f2Rv/gVEeKlfQrVbtFjtklWPHZix6NlOxD3xce0EmziuSTmx8dM9g7vx614IeYJkycEn+I7AEoa5G6LQEEV1sbwwBuvoA2DLqDcMg9denXbIoUj4YcR+ifz17rQdZ07hXgZf3WfuwWJmqh8f8Egvqd6z279nX5H5GfKJg4GXMIcANG9Kle4TNZJyL3xHTfKVrsu4svV+4HAtIdMlMm2S3DBqFZxcayuB2pXviZtjYOkNXFg3a7DyNrZ2p2i18QFaS3KG8sM9XugMwGPa0cTUMxeUwu+eCg7x0pT9WWArSboJvRnTyffrf6d9+wq8aYlfHa7Dn6LU5r6t9+Wyfiav4pOsgisp4+ON+DPTgnPLHVUFdhgSaiOsny0zEF0MFDyavTE/qIMSN2cuw5x042UiiNLy3i9yJ4LNyl1UnciPUNDwmC+LwNQUGOncLbnnrBQg5qBYyRi1H65ODl3gwSu93pexTHwwJ8p52ymaS84cgxnueVFswwtp4A3BaRYZko/c4hd8rYS/FQxK0Lt4BDu4bPb5ajuoiQosKQDqHQkdMGiNLFMq/fP9G8Ou62gP+Z+Vnyz8trZE5AxBya4mxnEECkTaUDGo7B9ApvV2vbz1rhL5v4jQz2rHsFsyxg/zA9unOWbGOJ427JC7IIEeaV1dZBN+dktZGEL/83R1Cz+dx5ox7mZDnGw5IMEqKou5CGgY65r+NVqwuU2aQj4pzdb/yRAyrY5exMCb9QzhZE4tDe8PdUg5THl7IhTIvGWFLjcXEll5CVNnD2BG6rC9PGGerowLHRkGkxQt4jxfZqHxZtDxLZJPR0gA8ZzPS6os/mpgLC8hfbrLaPrSPAGhOiyk+eRaUCWeBfyoYSzx/f9EHS87Q1twsvLe6oQXXk/5lSRt20bcIYOt5bcX/IGOxaJgWGZPG14fJSV7GESeA/tty9UOnQH4+yvbRCDxMh7AfJEcd7aqLK8hC7T1esNL/kL+dAVkx8WWoCzbVqBzfz811+MXgX49d/hTi+hoxtdhZG9ONO8T7yBZtx4YO/ExBTPA5SLehe86Mds48BwetKCD1TIr9Dzhf76kbYHRSTRU568wVj1kHQMmcESlDdOVu/Gbc0RisAHpeB0emLE5AgVvj7m1QIK3ubStYWnax0V0BP+zv5V3OaK1uXDkBes5p51bIjS5OYO/EsvzaIAHQIC+sOho6PzK/4C4l2DDQmBOel8gCuzg7aHSe8QxQ0MVM0tF1W4w093k/iIKTPWjpq2BcQtiBLfokTUlcT1vFGCPAOnw6vCTzWK1d9z2Gc7IXNtQh/je+EerLgkELSeWdWIttHJZJDQ81e9w+JdqtpjYLOd/u6UdIG0eY8grBheOZQ/qaqdb21OesSN6Pre5jhPy8xxxRdeXbQs7hzTYlZNxlhAeU8hgabQxpRAKEv2k0PnaKfWal/LxVtX3RJRmeLfLmZzyG9BFExQXHG4fP7evmQpKoIQ==,iv:OZPoG6dZCPloHpFJGYqf4kgG1WtJw5opngvQ+UDfckA=,tag:czMZp26F+2lBzSOoektFYQ==,type:str] + os: + crt: ENC[AES256_GCM,data:gzgv2bzsHhQB7XwUxE2UY2tVdXHp2iBXNEMaqbA33Q1LI0CmwoZHwHg2qy80g94amHJ+fIQc3qACel1MYLI1hQOzvoOsmh+lHZ6otyn+ToHPslT2kBUdxTxZ5EQeNt0qexYI4a9oCf3a5miMHFY+C/e1EuS+ANOdRBDqzmVdzRlztWDEt+n/WnphIX+G9Z+3fMQ2bciEDkbah1UORZL+Y6POl+eJpxgbWaYNuwopNwt2T52k+Dz6oxHYcB8UaQQN8Zdi+0oATi/8ln+p9EXlslNvRDdn1/tWod7AjcknjK7dX0rRkPKAxaFiMweEhvTpOA7MnzacCikAqC0kLk8uQnik5VhC/QhoAMwtYwbvo7RhFinaDvFm/jxPd1jD+UcVhIWrZfIH1Rz1j6bT6mFAN1L0HVN+xBHTh3UvWN3wJWiav+1Dky+CKiJ7D2cTLJI+M/B43j63Sft0fxujLQWeUzypnPZyBoE5J8sN5iQ8iHzvL91lKs8KVXEJNs7vjBqOuw5XeRbhV6tW8qfFjzcp6HLGkFHgFeJCSrTKrdgiZKrhFtFVj0fZM0fCbPIm3JV5euuu5Op3jp7fSAuEkIy4KaaNQK1ed0TKlsuw8PsDhbBmDOHNSvvWAV78wSftC5GPcoEsyhCKK13PtTgxib5Wl2eq9GQkguNWNHzDYnNnTrPc36NI6zFe1z81EkA2SSsOK0sfVMamwovbaEy+6qvGQcQD5rQ/kQ2QVleEHx5HLFJq8lm+l1ib85eDVGxIeRLObWTutc0t27I0V2+m3z0HamIRC2Mvwy5Y3192Er5FQWciiweFXbVly+YOIm3ShG6/xUUleS4Zm337wzbB/b5diS96T3qGYSyfxvtBktAP2dvujLn1,iv:qXK3gTNEBwIdMVxvO66h7IQaF61DSHgn5/QhPgKClVY=,tag:f1pgGZOoAzxnPucCsG6dDg==,type:str] + key: ENC[AES256_GCM,data:FbVH7BHYdv16WUwFO6hzJbzdAj/c9GT/KNHhbtUfYk/L+JWPweusj6iL9mIYWqZXN2j3SpOD7wcPivCWo/BLP+fIiQZsv4+ZENugenT4qnh0jvmzJAs8uJR0D3O3JQK0q+HPmgW/dCZlVsHRNZqZNczebP4hGzfTjImaryIWfBAO3E3DoG0zXr07Qkmw5b9wj2JDsS3J+SkUzMUoUKg64/LxRBRo/lgmoygsk3SaCmdi02Ix,iv:WhfFxd9JVXYAq674QcobATaUzHjnUdPINxbAUVBUHss=,tag:OzSfTki4D5hZnf1aw7AuMg==,type:str] +sops: + kms: [] + gcp_kms: [] + azure_kv: [] + hc_vault: [] + age: + - recipient: age1frhtpr8u3u99pvcuq5mjevxdq9agjfpkd8fjtnpj9qymzh5v845q53f37d + enc: | + -----BEGIN AGE ENCRYPTED FILE----- + YWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSB1MnZ2RGE1VGNwN1ZJbzBl + Mi9UTWNwOWpTQWMwT0JhMERqN1BXaDR1WVV3CnlhekJWMzkwZHZlemVPcVBGMXF3 + S1JEVDZhNFIvak9jMWxSZ1NKcnYxRXcKLS0tIFFqOFpISm5TMVhtZ2hwVERTK0U0 + RWpBMFhSYU9mRlRja0huaEp5cEMxTWsKaLI7bNPNZ7wX2ujpxfS3t7md1YD70G68 + uQ9CPLHh+sLkUyHZnbUz4UuYf0J6SikrUyeEvRdTSioyhndOhyH2HA== + -----END AGE ENCRYPTED FILE----- + lastmodified: "2024-10-08T21:07:14Z" + mac: ENC[AES256_GCM,data:TGqe45X2rGjba1qJ/Qn6aZeIQ1L2plLVdH9F9bxLDZrakprnqFuFPnrTktkVwZ/I3hzdwlju98vFlG1VG2lbJQETnkrrBUmkoibapjLpPqvzlb1Vmz9j68Qc+YNw80cTQFHQRDv9BXP9JcvKxKYov9STE4k5rTmh+kAumkvfYSI=,iv:QHI2AsV1Wpg7dfWAy79cxCTvA7wzmqXyztApLchWUMg=,tag:Fsl0HKk/gnXaV34rIdSGsw==,type:str] + pgp: [] + unencrypted_suffix: _unencrypted + version: 3.9.1 diff --git a/kubernetes/main/flux/apps.yaml b/kubernetes/main/flux/apps.yaml new file mode 100644 index 000000000..59cb09fad --- /dev/null +++ b/kubernetes/main/flux/apps.yaml @@ -0,0 +1,41 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/kustomize.toolkit.fluxcd.io/kustomization_v1.json +apiVersion: kustomize.toolkit.fluxcd.io/v1 +kind: Kustomization +metadata: + name: cluster-apps + namespace: flux-system +spec: + interval: 10m + path: ./kubernetes/main/apps + prune: true + sourceRef: + kind: GitRepository + name: home-kubernetes + decryption: + provider: sops + secretRef: + name: sops-age + postBuild: + substituteFrom: + - kind: Secret + name: cluster-secrets + patches: + - patch: |- + apiVersion: kustomize.toolkit.fluxcd.io/v1 + kind: Kustomization + metadata: + name: not-used + spec: + decryption: + provider: sops + secretRef: + name: sops-age + postBuild: + substituteFrom: + - kind: Secret + name: cluster-secrets + target: + group: kustomize.toolkit.fluxcd.io + kind: Kustomization + labelSelector: substitution.flux.home.arpa/disabled notin (true) diff --git a/kubernetes/main/flux/config/cluster.yaml b/kubernetes/main/flux/config/cluster.yaml new file mode 100644 index 000000000..e9699ac4a --- /dev/null +++ b/kubernetes/main/flux/config/cluster.yaml @@ -0,0 +1,43 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/source.toolkit.fluxcd.io/gitrepository_v1.json +apiVersion: source.toolkit.fluxcd.io/v1 +kind: GitRepository +metadata: + name: home-kubernetes + namespace: flux-system +spec: + interval: 30m + url: ssh://git@github.com/Darkfella91/home-ops + ref: + branch: main + secretRef: + name: deploy-key + ignore: | + # exclude all + /* + # include flux directories + !/kubernetes/main +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/kustomize.toolkit.fluxcd.io/kustomization_v1.json +apiVersion: kustomize.toolkit.fluxcd.io/v1 +kind: Kustomization +metadata: + name: cluster + namespace: flux-system +spec: + interval: 30m + path: ./kubernetes/main/flux + prune: true + wait: false + sourceRef: + kind: GitRepository + name: home-kubernetes + decryption: + provider: sops + secretRef: + name: sops-age + postBuild: + substituteFrom: + - kind: Secret + name: cluster-secrets + optional: false diff --git a/kubernetes/main/flux/config/flux.yaml b/kubernetes/main/flux/config/flux.yaml new file mode 100644 index 000000000..b0694a523 --- /dev/null +++ b/kubernetes/main/flux/config/flux.yaml @@ -0,0 +1,177 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/source.toolkit.fluxcd.io/ocirepository_v1beta2.json +apiVersion: source.toolkit.fluxcd.io/v1beta2 +kind: OCIRepository +metadata: + name: flux-manifests + namespace: flux-system +spec: + interval: 10m + url: oci://ghcr.io/fluxcd/flux-manifests + ref: + tag: v2.4.0 +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/kustomize.toolkit.fluxcd.io/kustomization_v1.json +apiVersion: kustomize.toolkit.fluxcd.io/v1 +kind: Kustomization +metadata: + name: flux + namespace: flux-system +spec: + interval: 10m + path: ./ + prune: true + wait: true + sourceRef: + kind: OCIRepository + name: flux-manifests + patches: + # Remove image automation and image reflector controllers + - patch: | + $patch: delete + apiVersion: apps/v1 + kind: Deployment + metadata: + name: all + target: + kind: Deployment + name: (image-automation-controller|image-reflector-controller) + # Remove the built-in network policies + - patch: | + $patch: delete + apiVersion: networking.k8s.io/v1 + kind: NetworkPolicy + metadata: + name: all + target: + group: networking.k8s.io + kind: NetworkPolicy + # Increase the number of workers and limits + # Ref: https://fluxcd.io/flux/installation/configuration/vertical-scaling/#increase-the-number-of-workers-and-limits + - patch: | + - op: add + path: /spec/template/spec/containers/0/args/- + value: --concurrent=10 + - op: add + path: /spec/template/spec/containers/0/args/- + value: --requeue-dependency=5s + target: + kind: Deployment + name: (kustomize-controller|helm-controller|source-controller) + - patch: | + apiVersion: apps/v1 + kind: Deployment + metadata: + name: all + spec: + template: + spec: + containers: + - name: manager + resources: + limits: + memory: 2Gi + target: + kind: Deployment + name: (kustomize-controller|helm-controller|source-controller) + # Enable in-memory kustomize builds + # Ref: https://fluxcd.io/flux/installation/configuration/vertical-scaling/#enable-in-memory-kustomize-builds + - patch: | + - op: add + path: /spec/template/spec/containers/0/args/- + value: --concurrent=20 + - op: replace + path: /spec/template/spec/volumes/0 + value: + name: temp + emptyDir: + medium: Memory + target: + kind: Deployment + name: kustomize-controller + # Enable Helm repositories caching + # Ref: https://fluxcd.io/flux/installation/configuration/vertical-scaling/#enable-helm-repositories-caching + - patch: | + - op: add + path: /spec/template/spec/containers/0/args/- + value: --helm-cache-max-size=10 + - op: add + path: /spec/template/spec/containers/0/args/- + value: --helm-cache-ttl=60m + - op: add + path: /spec/template/spec/containers/0/args/- + value: --helm-cache-purge-interval=5m + target: + kind: Deployment + name: source-controller + # Flux near OOM detection for Helm + # Ref: https://fluxcd.io/flux/installation/configuration/helm-oom-detection/ + - patch: | + - op: add + path: /spec/template/spec/containers/0/args/- + value: --feature-gates=OOMWatch=true + - op: add + path: /spec/template/spec/containers/0/args/- + value: --oom-watch-memory-threshold=95 + - op: add + path: /spec/template/spec/containers/0/args/- + value: --oom-watch-interval=500ms + target: + kind: Deployment + name: helm-controller + - patch: | + apiVersion: apps/v1 + kind: Deployment + metadata: + name: kustomize-controller + spec: + template: + spec: + containers: + - name: manager + image: ghcr.io/fluxcd/kustomize-controller:v1.4.0@sha256:e3b0cf847e9cdf47b19af0fbcfe22786b80b598e0caeea8b6d2a5f9c26a48a24 + target: + kind: Deployment + name: kustomize-controller + - patch: | + apiVersion: apps/v1 + kind: Deployment + metadata: + name: helm-controller + spec: + template: + spec: + containers: + - name: manager + image: ghcr.io/fluxcd/helm-controller:v1.1.0@sha256:4c75ca6c24ceb1f1bd7e935d9287a93e4f925c512f206763ec5a47de3ef3ff48 + target: + kind: Deployment + name: helm-controller + - patch: | + apiVersion: apps/v1 + kind: Deployment + metadata: + name: source-controller + spec: + template: + spec: + containers: + - name: manager + image: ghcr.io/fluxcd/source-controller:v1.4.1@sha256:3c5f0f022f990ffc0daf00e5b199548fc0fa6e7119e972318f0267081a332963 + target: + kind: Deployment + name: source-controller + - patch: | + apiVersion: apps/v1 + kind: Deployment + metadata: + name: notification-controller + spec: + template: + spec: + containers: + - name: manager + image: ghcr.io/fluxcd/notification-controller:v1.4.0@sha256:425309a159b15e07f7d97622effc79bc432a37ed55289dd465d37fa217a92a7d + target: + kind: Deployment + name: notification-controller diff --git a/kubernetes/main/flux/config/kustomization.yaml b/kubernetes/main/flux/config/kustomization.yaml new file mode 100644 index 000000000..2ff3c784d --- /dev/null +++ b/kubernetes/main/flux/config/kustomization.yaml @@ -0,0 +1,7 @@ +--- +# yaml-language-server: $schema=https://json.schemastore.org/kustomization +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - ./flux.yaml + - ./cluster.yaml diff --git a/kubernetes/main/flux/repositories/helm/actions-runner-controller.yaml b/kubernetes/main/flux/repositories/helm/actions-runner-controller.yaml new file mode 100644 index 000000000..54fa67be8 --- /dev/null +++ b/kubernetes/main/flux/repositories/helm/actions-runner-controller.yaml @@ -0,0 +1,11 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/source.toolkit.fluxcd.io/helmrepository_v1.json +apiVersion: source.toolkit.fluxcd.io/v1 +kind: HelmRepository +metadata: + name: actions-runner-controller + namespace: flux-system +spec: + type: oci + interval: 5m + url: oci://ghcr.io/actions/actions-runner-controller-charts diff --git a/kubernetes/main/flux/repositories/helm/authentik.yaml b/kubernetes/main/flux/repositories/helm/authentik.yaml new file mode 100644 index 000000000..6c465b823 --- /dev/null +++ b/kubernetes/main/flux/repositories/helm/authentik.yaml @@ -0,0 +1,10 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/source.toolkit.fluxcd.io/helmrepository_v1.json +apiVersion: source.toolkit.fluxcd.io/v1 +kind: HelmRepository +metadata: + name: authentik + namespace: flux-system +spec: + interval: 2h + url: https://charts.goauthentik.io diff --git a/kubernetes/main/flux/repositories/helm/backube.yaml b/kubernetes/main/flux/repositories/helm/backube.yaml new file mode 100644 index 000000000..4ba0742ca --- /dev/null +++ b/kubernetes/main/flux/repositories/helm/backube.yaml @@ -0,0 +1,10 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/source.toolkit.fluxcd.io/helmrepository_v1.json +apiVersion: source.toolkit.fluxcd.io/v1 +kind: HelmRepository +metadata: + name: backube + namespace: flux-system +spec: + interval: 2h + url: https://backube.github.io/helm-charts/ diff --git a/kubernetes/main/flux/repositories/helm/bitnami.yaml b/kubernetes/main/flux/repositories/helm/bitnami.yaml new file mode 100644 index 000000000..9f84188c5 --- /dev/null +++ b/kubernetes/main/flux/repositories/helm/bitnami.yaml @@ -0,0 +1,11 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/source.toolkit.fluxcd.io/helmrepository_v1.json +apiVersion: source.toolkit.fluxcd.io/v1 +kind: HelmRepository +metadata: + name: bitnami + namespace: flux-system +spec: + type: oci + interval: 5m + url: oci://registry-1.docker.io/bitnamicharts diff --git a/kubernetes/main/flux/repositories/helm/bjw-s.yaml b/kubernetes/main/flux/repositories/helm/bjw-s.yaml new file mode 100644 index 000000000..c32ccd8de --- /dev/null +++ b/kubernetes/main/flux/repositories/helm/bjw-s.yaml @@ -0,0 +1,11 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/source.toolkit.fluxcd.io/helmrepository_v1.json +apiVersion: source.toolkit.fluxcd.io/v1 +kind: HelmRepository +metadata: + name: bjw-s + namespace: flux-system +spec: + type: oci + interval: 5m + url: oci://ghcr.io/bjw-s/helm diff --git a/kubernetes/main/flux/repositories/helm/cilium.yaml b/kubernetes/main/flux/repositories/helm/cilium.yaml new file mode 100644 index 000000000..2cd7146d9 --- /dev/null +++ b/kubernetes/main/flux/repositories/helm/cilium.yaml @@ -0,0 +1,10 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/source.toolkit.fluxcd.io/helmrepository_v1.json +apiVersion: source.toolkit.fluxcd.io/v1 +kind: HelmRepository +metadata: + name: cilium + namespace: flux-system +spec: + interval: 2h + url: https://helm.cilium.io diff --git a/kubernetes/main/flux/repositories/helm/cloudnative-pg.yaml b/kubernetes/main/flux/repositories/helm/cloudnative-pg.yaml new file mode 100644 index 000000000..4b2f0e615 --- /dev/null +++ b/kubernetes/main/flux/repositories/helm/cloudnative-pg.yaml @@ -0,0 +1,10 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/source.toolkit.fluxcd.io/helmrepository_v1.json +apiVersion: source.toolkit.fluxcd.io/v1 +kind: HelmRepository +metadata: + name: cloudnative-pg + namespace: flux-system +spec: + interval: 2h + url: https://cloudnative-pg.github.io/charts diff --git a/kubernetes/main/flux/repositories/helm/coredns.yaml b/kubernetes/main/flux/repositories/helm/coredns.yaml new file mode 100644 index 000000000..ed0bb65a9 --- /dev/null +++ b/kubernetes/main/flux/repositories/helm/coredns.yaml @@ -0,0 +1,10 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/source.toolkit.fluxcd.io/helmrepository_v1.json +apiVersion: source.toolkit.fluxcd.io/v1 +kind: HelmRepository +metadata: + name: coredns + namespace: flux-system +spec: + interval: 2h + url: https://coredns.github.io/helm diff --git a/kubernetes/main/flux/repositories/helm/crowdsec.yaml b/kubernetes/main/flux/repositories/helm/crowdsec.yaml new file mode 100644 index 000000000..ff698010b --- /dev/null +++ b/kubernetes/main/flux/repositories/helm/crowdsec.yaml @@ -0,0 +1,10 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/source.toolkit.fluxcd.io/helmrepository_v1.json +apiVersion: source.toolkit.fluxcd.io/v1 +kind: HelmRepository +metadata: + name: crowdsec + namespace: flux-system +spec: + interval: 2h + url: https://crowdsecurity.github.io/helm-charts diff --git a/kubernetes/main/flux/repositories/helm/csi-driver-nfs.yaml b/kubernetes/main/flux/repositories/helm/csi-driver-nfs.yaml new file mode 100644 index 000000000..869fce395 --- /dev/null +++ b/kubernetes/main/flux/repositories/helm/csi-driver-nfs.yaml @@ -0,0 +1,10 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/source.toolkit.fluxcd.io/helmrepository_v1.json +apiVersion: source.toolkit.fluxcd.io/v1 +kind: HelmRepository +metadata: + name: csi-driver-nfs + namespace: flux-system +spec: + interval: 2h + url: https://raw.githubusercontent.com/kubernetes-csi/csi-driver-nfs/master/charts diff --git a/kubernetes/main/flux/repositories/helm/descheduler.yaml b/kubernetes/main/flux/repositories/helm/descheduler.yaml new file mode 100644 index 000000000..147045cd2 --- /dev/null +++ b/kubernetes/main/flux/repositories/helm/descheduler.yaml @@ -0,0 +1,10 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/source.toolkit.fluxcd.io/helmrepository_v1.json +apiVersion: source.toolkit.fluxcd.io/v1 +kind: HelmRepository +metadata: + name: descheduler + namespace: flux-system +spec: + interval: 2h + url: https://kubernetes-sigs.github.io/descheduler diff --git a/kubernetes/main/flux/repositories/helm/emberstack.yaml b/kubernetes/main/flux/repositories/helm/emberstack.yaml new file mode 100644 index 000000000..9ca4b3149 --- /dev/null +++ b/kubernetes/main/flux/repositories/helm/emberstack.yaml @@ -0,0 +1,10 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/source.toolkit.fluxcd.io/helmrepository_v1.json +apiVersion: source.toolkit.fluxcd.io/v1 +kind: HelmRepository +metadata: + name: emberstack + namespace: flux-system +spec: + interval: 24h + url: https://emberstack.github.io/helm-charts diff --git a/kubernetes/main/flux/repositories/helm/external-dns.yaml b/kubernetes/main/flux/repositories/helm/external-dns.yaml new file mode 100644 index 000000000..2392dac23 --- /dev/null +++ b/kubernetes/main/flux/repositories/helm/external-dns.yaml @@ -0,0 +1,10 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/source.toolkit.fluxcd.io/helmrepository_v1.json +apiVersion: source.toolkit.fluxcd.io/v1 +kind: HelmRepository +metadata: + name: external-dns + namespace: flux-system +spec: + interval: 2h + url: https://kubernetes-sigs.github.io/external-dns diff --git a/kubernetes/main/flux/repositories/helm/external-secrets.yaml b/kubernetes/main/flux/repositories/helm/external-secrets.yaml new file mode 100644 index 000000000..2acd768af --- /dev/null +++ b/kubernetes/main/flux/repositories/helm/external-secrets.yaml @@ -0,0 +1,10 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/source.toolkit.fluxcd.io/helmrepository_v1.json +apiVersion: source.toolkit.fluxcd.io/v1 +kind: HelmRepository +metadata: + name: external-secrets + namespace: flux-system +spec: + interval: 2h + url: https://charts.external-secrets.io diff --git a/kubernetes/main/flux/repositories/helm/grafana.yaml b/kubernetes/main/flux/repositories/helm/grafana.yaml new file mode 100644 index 000000000..eb1a6fb0c --- /dev/null +++ b/kubernetes/main/flux/repositories/helm/grafana.yaml @@ -0,0 +1,10 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/source.toolkit.fluxcd.io/helmrepository_v1.json +apiVersion: source.toolkit.fluxcd.io/v1 +kind: HelmRepository +metadata: + name: grafana + namespace: flux-system +spec: + interval: 2h + url: https://grafana.github.io/helm-charts diff --git a/kubernetes/main/flux/repositories/helm/ingress-nginx.yaml b/kubernetes/main/flux/repositories/helm/ingress-nginx.yaml new file mode 100644 index 000000000..8e107adc6 --- /dev/null +++ b/kubernetes/main/flux/repositories/helm/ingress-nginx.yaml @@ -0,0 +1,10 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/source.toolkit.fluxcd.io/helmrepository_v1.json +apiVersion: source.toolkit.fluxcd.io/v1 +kind: HelmRepository +metadata: + name: ingress-nginx + namespace: flux-system +spec: + interval: 2h + url: https://kubernetes.github.io/ingress-nginx diff --git a/kubernetes/main/flux/repositories/helm/jetstack.yaml b/kubernetes/main/flux/repositories/helm/jetstack.yaml new file mode 100644 index 000000000..4bc09d02a --- /dev/null +++ b/kubernetes/main/flux/repositories/helm/jetstack.yaml @@ -0,0 +1,10 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/source.toolkit.fluxcd.io/helmrepository_v1.json +apiVersion: source.toolkit.fluxcd.io/v1 +kind: HelmRepository +metadata: + name: jetstack + namespace: flux-system +spec: + interval: 2h + url: https://charts.jetstack.io/ diff --git a/kubernetes/main/flux/repositories/helm/kustomization.yaml b/kubernetes/main/flux/repositories/helm/kustomization.yaml new file mode 100644 index 000000000..b09a93a77 --- /dev/null +++ b/kubernetes/main/flux/repositories/helm/kustomization.yaml @@ -0,0 +1,35 @@ +--- +# yaml-language-server: $schema=https://json.schemastore.org/kustomization +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - ./actions-runner-controller.yaml + - ./authentik.yaml + - ./backube.yaml + - ./bitnami.yaml + - ./bjw-s.yaml + - ./cilium.yaml + - ./cloudnative-pg.yaml + - ./crowdsec.yaml + - ./coredns.yaml + - ./csi-driver-nfs.yaml + - ./descheduler.yaml + - ./emberstack.yaml + - ./external-dns.yaml + - ./external-secrets.yaml + - ./vaultwarden.yaml + - ./grafana.yaml + - ./jetstack.yaml + - ./kyverno.yaml + - ./metrics-server.yaml + - ./node-feature-discovery.yaml + - ./piraeus.yaml + - ./postfinance.yaml + - ./prometheus-community.yaml + - ./stakater.yaml + - ./topolvm.yaml + - ./truecharts.yaml + - ./nvidia-device-plugin.yaml + - ./ingress-nginx.yaml + - ./vault.yaml + - ./oauth2-proxy.yaml diff --git a/kubernetes/main/flux/repositories/helm/kyverno.yaml b/kubernetes/main/flux/repositories/helm/kyverno.yaml new file mode 100644 index 000000000..b86efb0a7 --- /dev/null +++ b/kubernetes/main/flux/repositories/helm/kyverno.yaml @@ -0,0 +1,11 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/source.toolkit.fluxcd.io/helmrepository_v1.json +apiVersion: source.toolkit.fluxcd.io/v1 +kind: HelmRepository +metadata: + name: kyverno + namespace: flux-system +spec: + type: oci + interval: 5m + url: oci://ghcr.io/kyverno/charts diff --git a/kubernetes/main/flux/repositories/helm/metrics-server.yaml b/kubernetes/main/flux/repositories/helm/metrics-server.yaml new file mode 100644 index 000000000..5b2d20f03 --- /dev/null +++ b/kubernetes/main/flux/repositories/helm/metrics-server.yaml @@ -0,0 +1,10 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/source.toolkit.fluxcd.io/helmrepository_v1.json +apiVersion: source.toolkit.fluxcd.io/v1 +kind: HelmRepository +metadata: + name: metrics-server + namespace: flux-system +spec: + interval: 2h + url: https://kubernetes-sigs.github.io/metrics-server diff --git a/kubernetes/main/flux/repositories/helm/node-feature-discovery.yaml b/kubernetes/main/flux/repositories/helm/node-feature-discovery.yaml new file mode 100644 index 000000000..5e45d5a82 --- /dev/null +++ b/kubernetes/main/flux/repositories/helm/node-feature-discovery.yaml @@ -0,0 +1,10 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/source.toolkit.fluxcd.io/helmrepository_v1.json +apiVersion: source.toolkit.fluxcd.io/v1 +kind: HelmRepository +metadata: + name: node-feature-discovery + namespace: flux-system +spec: + interval: 2h + url: https://kubernetes-sigs.github.io/node-feature-discovery/charts diff --git a/kubernetes/main/flux/repositories/helm/nvidia-device-plugin.yaml b/kubernetes/main/flux/repositories/helm/nvidia-device-plugin.yaml new file mode 100644 index 000000000..9d6501049 --- /dev/null +++ b/kubernetes/main/flux/repositories/helm/nvidia-device-plugin.yaml @@ -0,0 +1,10 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/source.toolkit.fluxcd.io/helmrepository_v1.json +apiVersion: source.toolkit.fluxcd.io/v1 +kind: HelmRepository +metadata: + name: nvdp + namespace: flux-system +spec: + interval: 2h + url: https://nvidia.github.io/k8s-device-plugin diff --git a/kubernetes/main/flux/repositories/helm/oauth2-proxy.yaml b/kubernetes/main/flux/repositories/helm/oauth2-proxy.yaml new file mode 100644 index 000000000..807fe58d2 --- /dev/null +++ b/kubernetes/main/flux/repositories/helm/oauth2-proxy.yaml @@ -0,0 +1,10 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/source.toolkit.fluxcd.io/helmrepository_v1.json +apiVersion: source.toolkit.fluxcd.io/v1 +kind: HelmRepository +metadata: + name: oauth2-proxy + namespace: flux-system +spec: + interval: 2h + url: https://oauth2-proxy.github.io/manifests diff --git a/kubernetes/main/flux/repositories/helm/piraeus.yaml b/kubernetes/main/flux/repositories/helm/piraeus.yaml new file mode 100644 index 000000000..4fe31ddb0 --- /dev/null +++ b/kubernetes/main/flux/repositories/helm/piraeus.yaml @@ -0,0 +1,10 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/source.toolkit.fluxcd.io/helmrepository_v1.json +apiVersion: source.toolkit.fluxcd.io/v1 +kind: HelmRepository +metadata: + name: piraeus + namespace: flux-system +spec: + interval: 2h + url: https://piraeus.io/helm-charts/ diff --git a/kubernetes/main/flux/repositories/helm/postfinance.yaml b/kubernetes/main/flux/repositories/helm/postfinance.yaml new file mode 100644 index 000000000..015568bfc --- /dev/null +++ b/kubernetes/main/flux/repositories/helm/postfinance.yaml @@ -0,0 +1,10 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/source.toolkit.fluxcd.io/helmrepository_v1.json +apiVersion: source.toolkit.fluxcd.io/v1 +kind: HelmRepository +metadata: + name: postfinance + namespace: flux-system +spec: + interval: 2h + url: https://postfinance.github.io/kubelet-csr-approver diff --git a/kubernetes/main/flux/repositories/helm/prometheus-community.yaml b/kubernetes/main/flux/repositories/helm/prometheus-community.yaml new file mode 100644 index 000000000..78c4f0c0f --- /dev/null +++ b/kubernetes/main/flux/repositories/helm/prometheus-community.yaml @@ -0,0 +1,11 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/source.toolkit.fluxcd.io/helmrepository_v1.json +apiVersion: source.toolkit.fluxcd.io/v1 +kind: HelmRepository +metadata: + name: prometheus-community + namespace: flux-system +spec: + type: oci + interval: 5m + url: oci://ghcr.io/prometheus-community/charts diff --git a/kubernetes/main/flux/repositories/helm/stakater.yaml b/kubernetes/main/flux/repositories/helm/stakater.yaml new file mode 100644 index 000000000..838185d06 --- /dev/null +++ b/kubernetes/main/flux/repositories/helm/stakater.yaml @@ -0,0 +1,11 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/source.toolkit.fluxcd.io/helmrepository_v1.json +apiVersion: source.toolkit.fluxcd.io/v1 +kind: HelmRepository +metadata: + name: stakater + namespace: flux-system +spec: + type: oci + interval: 5m + url: oci://ghcr.io/stakater/charts diff --git a/kubernetes/main/flux/repositories/helm/topolvm.yaml b/kubernetes/main/flux/repositories/helm/topolvm.yaml new file mode 100644 index 000000000..3b438b609 --- /dev/null +++ b/kubernetes/main/flux/repositories/helm/topolvm.yaml @@ -0,0 +1,10 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/source.toolkit.fluxcd.io/helmrepository_v1.json +apiVersion: source.toolkit.fluxcd.io/v1 +kind: HelmRepository +metadata: + name: topolvm + namespace: flux-system +spec: + interval: 2h + url: https://topolvm.github.io/topolvm diff --git a/kubernetes/main/flux/repositories/helm/truecharts.yaml b/kubernetes/main/flux/repositories/helm/truecharts.yaml new file mode 100644 index 000000000..b9f1d39b1 --- /dev/null +++ b/kubernetes/main/flux/repositories/helm/truecharts.yaml @@ -0,0 +1,11 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/source.toolkit.fluxcd.io/helmrepository_v1.json +apiVersion: source.toolkit.fluxcd.io/v1 +kind: HelmRepository +metadata: + name: truecharts + namespace: flux-system +spec: + type: oci + interval: 5m + url: oci://tccr.io/truecharts diff --git a/kubernetes/main/flux/repositories/helm/vault.yaml b/kubernetes/main/flux/repositories/helm/vault.yaml new file mode 100644 index 000000000..8ee1755ec --- /dev/null +++ b/kubernetes/main/flux/repositories/helm/vault.yaml @@ -0,0 +1,10 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/source.toolkit.fluxcd.io/helmrepository_v1.json +apiVersion: source.toolkit.fluxcd.io/v1 +kind: HelmRepository +metadata: + name: vault + namespace: flux-system +spec: + interval: 2h + url: https://helm.releases.hashicorp.com diff --git a/kubernetes/main/flux/repositories/helm/vaultwarden.yaml b/kubernetes/main/flux/repositories/helm/vaultwarden.yaml new file mode 100644 index 000000000..59f5a916a --- /dev/null +++ b/kubernetes/main/flux/repositories/helm/vaultwarden.yaml @@ -0,0 +1,10 @@ +--- +# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/source.toolkit.fluxcd.io/helmrepository_v1.json +apiVersion: source.toolkit.fluxcd.io/v1 +kind: HelmRepository +metadata: + name: vaultwarden + namespace: flux-system +spec: + interval: 2h + url: https://guerzon.github.io/vaultwarden diff --git a/kubernetes/main/flux/repositories/kustomization.yaml b/kubernetes/main/flux/repositories/kustomization.yaml new file mode 100644 index 000000000..d6b26ce53 --- /dev/null +++ b/kubernetes/main/flux/repositories/kustomization.yaml @@ -0,0 +1,8 @@ +--- +# yaml-language-server: $schema=https://json.schemastore.org/kustomization +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + # - ./git + - ./helm + # - ./oci diff --git a/kubernetes/main/flux/vars/cluster-secrets.secret.sops.yaml b/kubernetes/main/flux/vars/cluster-secrets.secret.sops.yaml new file mode 100644 index 000000000..2e0ddf910 --- /dev/null +++ b/kubernetes/main/flux/vars/cluster-secrets.secret.sops.yaml @@ -0,0 +1,39 @@ +apiVersion: v1 +kind: Secret +metadata: + name: cluster-secrets + namespace: flux-system +stringData: + PUBLIC_DOMAIN: ENC[AES256_GCM,data:KeiBfsOYbAilanRFWTniJY8iApE=,iv:6sdpiOX7gZODMfv0vwO9HlROX7vILjbvNkYofNQKzjE=,tag:dCJkFwrof79py5fJ1yLSWg==,type:str] + EAB_KEY_ID: ENC[AES256_GCM,data:ndLcmUj76gb/PQ65+szPJkLiZf8b2Q==,iv:E6vKcUoVZaCdBaajCmaEE0ez+4MKSfh6xe6W9Iy1rts=,tag:LNlGqNVeiDBqBUaGEOiAvg==,type:str] + S3URL: ENC[AES256_GCM,data:4eXYOQuo2tDd+kAXZfg2XNUltBjM3OfEX7CUTwaz9mqr6UHSnVFFl2nxwXaE92Df6TVOe66w2780s0tcHmeVM81pBi0=,iv:fgwPt4wdCtL0NP8Xuz+pFRrlCEBzQ2uN0HNPx96dS5c=,tag:6ZQTrqsJ6Mm89uQfm8Y/VQ==,type:str] + CLUSTER_CLOUDFLARE_TUNNEL_ID: ENC[AES256_GCM,data:PwtYbcCui5UsC+rimZC5QNgwwZ+LZDQHjV4o+ARrWe53WIZf,iv:GVqwLq/4SR3sFj/0qRZ5SYt31j8ezGaeJsj6u6kQQqk=,tag:oBreoO25bpcGWcrImuugAg==,type:str] + AD_REALM: ENC[AES256_GCM,data:Bp+HSFGmnbVcxApev3YqKQiZFfY=,iv:AIFfzdF3O4n1WYxlwXCU64+VVKYGis6t4OQdYHYuHo4=,tag:63QA8Lmd14zlFZsA8ljwLg==,type:str] + KERBEROS_PASSWORD: ENC[AES256_GCM,data:lZZho8xEGfnVSag50A==,iv:JmrrZymVwUiPQueOG3n9cmd5smN4B2fsojOwIpxVY60=,tag:V7WAqaEZWCFdA8DNZB1c8Q==,type:str] + S3PREFIX: ENC[AES256_GCM,data:4WpmyuPVslvk246t,iv:QnZEv0J8vz9FBwdC4kV+wczCFFowNKpvTh/12v+Z8B8=,tag:yVLCGFqEsssvM/l9XPS9Pw==,type:str] + AWS_ACCESS_KEY: ENC[AES256_GCM,data:mPJ9xbM54GP9BkhOz/O7qaPsFk0=,iv:7SQQ2S89d7c0lVoxQP0JvbmpxELA8DXSYfCxaLBDF4I=,tag:tLQ/IVx6Dxuuq+K+I1vKhw==,type:str] + AWS_SECRET_KEY: ENC[AES256_GCM,data:+acohLCugLGUY6iMpS7dVjMbMag4teGImMfC35MlQIvvpmY1HdhUqw==,iv:JzSTxol9vwjDbnihICmD5pkDazoX3GGdnNnCJzvaMdo=,tag:vaZvofDsjoPGE/G+1XPZ7Q==,type:str] + AWS_KEY_ID: ENC[AES256_GCM,data:hLSFvhsaOzZF/2kEMKwD7bsk5QZTy2VAdDZcgRGDHjGjkQJFdVjsAROCV8T9pZ6ZuraCOjsGrBfq2/ZQ3+vYDOlKrTWBZh5DtlI4,iv:eDBs/XH6km+nOLuQ5ct1CnBPyObzgG9AA7zhsjcrES4=,tag:cW+ANHpGEbObt5wiPzIwJw==,type:str] + TURNSTILE_SITE_KEY_AUTHENTIK: ENC[AES256_GCM,data:q72oogbyphTZOni/LU+5mlGh+QXeXPCl,iv:LaoZr8nB0+qDfRoStjHRnKj+O7h9LCO7Rju4lCcqJwg=,tag:Vk9lAS6UAVteD61Wdbv2vg==,type:str] + TURNSTILE_SECRET_KEY_AUTHENTIK: ENC[AES256_GCM,data:oVTV7dW2a4wCSJMzeEKlBsxqi4W2kIkajXnYRGxdzoKLvcg=,iv:c19hiUYFWyxRTWP2v9oJwoY8a9Oolv2S5c+sjVNEAlo=,tag:dQzZHTfuFiLNjvMjtMMp9A==,type:str] +sops: + kms: [] + gcp_kms: [] + azure_kv: [] + hc_vault: [] + age: + - recipient: age1frhtpr8u3u99pvcuq5mjevxdq9agjfpkd8fjtnpj9qymzh5v845q53f37d + enc: | + -----BEGIN AGE ENCRYPTED FILE----- + YWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSA5T0RNbStKa0ttS3RiUk03 + WStycVN4dmhvUDBUT1o2a1R6Qnd5dkVPVFQ0ClozTS82RmlCc2o4TEQ2UGRoOGp3 + TDIxeGZHNSt1VHpGVkMweXA0RW5nY2sKLS0tIFJMeGMvWVdyRkpaVlMwa3ZBQis4 + QW1lVW5BaVpDR1JReEI2L3k1WGxwRW8KcjNPmPf05RKize92JHHw3qcWT3j5H7gE + 089qhWDhyldAZJAjJVK+6MJvFQj3JeCHHyAgfFsnhSWYBBoSCU48IQ== + -----END AGE ENCRYPTED FILE----- + lastmodified: "2024-11-05T04:16:20Z" + mac: ENC[AES256_GCM,data:19oJe5/4AjUxB1IocOWbdHvAIwaYbxH8/w58ZESyXfrlVKcMDcSYaiV+j8sI3T6zwCYebhRkmEg+HN0p1ZW4YuWOs81x7PD8EvEIyZF27P+dSu/h/wjDw1B6IQFAHjSZN4qiejpJ4BOkRPjzCIB1pnyW0l8YHBVRiAkWNM2PhIo=,iv:SRHk3lQ5i5BHFSXBG7jcXwB6yEdswE7mIZtLrs5qiCI=,tag:DOyKe9La5sIyacv9h8YAnQ==,type:str] + pgp: [] + encrypted_regex: ^(data|stringData)$ + mac_only_encrypted: true + version: 3.9.1 diff --git a/kubernetes/main/templates/gatus/external/configmap.yaml b/kubernetes/main/templates/gatus/external/configmap.yaml new file mode 100644 index 000000000..cbe59638a --- /dev/null +++ b/kubernetes/main/templates/gatus/external/configmap.yaml @@ -0,0 +1,20 @@ +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: "${APP}-gatus-ep" + labels: + gatus.io/enabled: "true" +data: + config.yaml: | + endpoints: + - name: "${APP}" + group: external + url: "https://${GATUS_SUBDOMAIN:-${APP}}.${PUBLIC_DOMAIN}${GATUS_PATH:-/}" + interval: 10m + client: + dns-resolver: tcp://1.1.1.1:53 + conditions: + - "[STATUS] == ${GATUS_STATUS:-200}" + alerts: + - type: pushover diff --git a/kubernetes/main/templates/gatus/external/kustomization.yaml b/kubernetes/main/templates/gatus/external/kustomization.yaml new file mode 100644 index 000000000..e09060b99 --- /dev/null +++ b/kubernetes/main/templates/gatus/external/kustomization.yaml @@ -0,0 +1,6 @@ +--- +# yaml-language-server: $schema=https://json.schemastore.org/kustomization +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - ./configmap.yaml diff --git a/kubernetes/main/templates/gatus/guarded/configmap.yaml b/kubernetes/main/templates/gatus/guarded/configmap.yaml new file mode 100644 index 000000000..70c106e84 --- /dev/null +++ b/kubernetes/main/templates/gatus/guarded/configmap.yaml @@ -0,0 +1,24 @@ +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: "${APP}-gatus-ep" + labels: + gatus.io/enabled: "true" +data: + config.yaml: | + endpoints: + - name: "${APP}" + group: guarded + url: "https://${GATUS_SUBDOMAIN:-${APP}}.${PUBLIC_DOMAIN}${GATUS_PATH:-/}" + interval: 10m + ui: + hide-hostname: true + hide-url: true + client: + dns-resolver: tcp://172.17.0.10:53 + conditions: + - "[STATUS] == 200" + - "[BODY].status == OK" + alerts: + - type: pushover diff --git a/kubernetes/main/templates/gatus/guarded/kustomization.yaml b/kubernetes/main/templates/gatus/guarded/kustomization.yaml new file mode 100644 index 000000000..e09060b99 --- /dev/null +++ b/kubernetes/main/templates/gatus/guarded/kustomization.yaml @@ -0,0 +1,6 @@ +--- +# yaml-language-server: $schema=https://json.schemastore.org/kustomization +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - ./configmap.yaml