From 0709f1168688770217a0bae60b0951f398b5e4de Mon Sep 17 00:00:00 2001 From: nikchern Date: Mon, 11 Apr 2022 20:55:33 -0700 Subject: [PATCH] BET-4793: libvirt simplified yaml support. (#17) * Adding local simplified yaml examples. * BET-4793: libvirt simplified yaml support. * BET-4793: Edge, Edge-vsphere, eks, tke simplified yaml support. * Adding local simplified yaml examples. --- .../config/cluster/cluster-libvirt-dev.yaml | 54 +- .../config/profile/profile-addon-1.yaml | 2 +- .../profile/profile-libvirt-full-gpu.yaml | 24 +- .../config/profile/profile-system.yaml | 2 +- examples/libvirt/mod_spectro_org.tf | 37 +- .../local-simplified-yaml/admin/README.md | 6 + .../config/profile/profile-bm-infra.yaml | 613 +++++++++ .../config/profile/profile-ehl-apps.yaml.x | 277 ++++ .../config/profile/profile-ehl-core.yaml.x | 55 + .../config/profile/profile-spectro-core.yaml | 50 + .../profile-vsphere-with-creds-infra.yaml | 206 +++ .../profile/profile-without-creds-infra.yaml | 617 +++++++++ .../config/project/project-hospital-200.yaml | 2 + .../config/project/project-hospital-201.yaml | 2 + .../config/project/project-hospital-202.yaml | 2 + .../admin/gitlab_project.tf | 62 + examples/local-simplified-yaml/admin/main.tf | 55 + .../admin/terraform.template.tfvars | 5 + .../project-bm-200/README.md | 1 + .../appliance/appliance-hospital-200.yaml | 1 + .../config/cluster/cluster-hospital-200.yaml | 50 + .../config/profile/profile-system.yaml | 1152 +++++++++++++++++ .../project-bm-200/main.tf | 71 + .../project-bm-200/terraform.template.tfvars | 4 + .../project-vsphere-with-creds-201/README.md | 1 + .../appliance/appliance-hospital-201.yaml | 1 + .../config/cluster/cluster-hospital-201.yaml | 44 + .../config/profile/profile-system.yaml | 751 +++++++++++ .../project-vsphere-with-creds-201/main.tf | 72 ++ .../terraform.template.tfvars | 4 + .../README.md | 1 + .../appliance/appliance-hospital-202.yaml | 1 + .../config/cluster/cluster-hospital-202.yaml | 26 + .../config/profile/profile-system.yaml | 321 +++++ .../project-vsphere-without-creds-202/main.tf | 72 ++ .../terraform.template.tfvars | 4 + main.tf | 2 +- spectro-cluster-edge-vsphere.tf | 46 + spectro-cluster-edge.tf | 46 + spectro-cluster-eks.tf | 46 + spectro-cluster-libvirt.tf | 47 + spectro-cluster-tke.tf | 48 +- 42 files changed, 4847 insertions(+), 36 deletions(-) create mode 100644 examples/local-simplified-yaml/admin/README.md create mode 100644 examples/local-simplified-yaml/admin/config/profile/profile-bm-infra.yaml create mode 100644 examples/local-simplified-yaml/admin/config/profile/profile-ehl-apps.yaml.x create mode 100644 examples/local-simplified-yaml/admin/config/profile/profile-ehl-core.yaml.x create mode 100644 examples/local-simplified-yaml/admin/config/profile/profile-spectro-core.yaml create mode 100644 examples/local-simplified-yaml/admin/config/profile/profile-vsphere-with-creds-infra.yaml create mode 100644 examples/local-simplified-yaml/admin/config/profile/profile-without-creds-infra.yaml create mode 100644 examples/local-simplified-yaml/admin/config/project/project-hospital-200.yaml create mode 100644 examples/local-simplified-yaml/admin/config/project/project-hospital-201.yaml create mode 100644 examples/local-simplified-yaml/admin/config/project/project-hospital-202.yaml create mode 100644 examples/local-simplified-yaml/admin/gitlab_project.tf create mode 100644 examples/local-simplified-yaml/admin/main.tf create mode 100644 examples/local-simplified-yaml/admin/terraform.template.tfvars create mode 100644 examples/local-simplified-yaml/project-bm-200/README.md create mode 100644 examples/local-simplified-yaml/project-bm-200/config/appliance/appliance-hospital-200.yaml create mode 100644 examples/local-simplified-yaml/project-bm-200/config/cluster/cluster-hospital-200.yaml create mode 100644 examples/local-simplified-yaml/project-bm-200/config/profile/profile-system.yaml create mode 100644 examples/local-simplified-yaml/project-bm-200/main.tf create mode 100644 examples/local-simplified-yaml/project-bm-200/terraform.template.tfvars create mode 100644 examples/local-simplified-yaml/project-vsphere-with-creds-201/README.md create mode 100644 examples/local-simplified-yaml/project-vsphere-with-creds-201/config/appliance/appliance-hospital-201.yaml create mode 100644 examples/local-simplified-yaml/project-vsphere-with-creds-201/config/cluster/cluster-hospital-201.yaml create mode 100644 examples/local-simplified-yaml/project-vsphere-with-creds-201/config/profile/profile-system.yaml create mode 100644 examples/local-simplified-yaml/project-vsphere-with-creds-201/main.tf create mode 100644 examples/local-simplified-yaml/project-vsphere-with-creds-201/terraform.template.tfvars create mode 100644 examples/local-simplified-yaml/project-vsphere-without-creds-202/README.md create mode 100644 examples/local-simplified-yaml/project-vsphere-without-creds-202/config/appliance/appliance-hospital-202.yaml create mode 100644 examples/local-simplified-yaml/project-vsphere-without-creds-202/config/cluster/cluster-hospital-202.yaml create mode 100644 examples/local-simplified-yaml/project-vsphere-without-creds-202/config/profile/profile-system.yaml create mode 100644 examples/local-simplified-yaml/project-vsphere-without-creds-202/main.tf create mode 100644 examples/local-simplified-yaml/project-vsphere-without-creds-202/terraform.template.tfvars diff --git a/examples/libvirt/config/cluster/cluster-libvirt-dev.yaml b/examples/libvirt/config/cluster/cluster-libvirt-dev.yaml index f23efee..376b182 100644 --- a/examples/libvirt/config/cluster/cluster-libvirt-dev.yaml +++ b/examples/libvirt/config/cluster/cluster-libvirt-dev.yaml @@ -5,12 +5,45 @@ profiles: infra: name: libvirt-gpu-full system: - name: system-profile + name: system-profile-simyam-libvirt addons: - - name: addon-profile-1 + - name: addon-profile-simyam-libvirt cloud_config: ssh_key: spectro2022 vip: 192.168.100.15 +cluster_rbac_binding: + - type: "ClusterRoleBinding" + role: + kind: "ClusterRole" + name: "testRole3" + subjects: + - type: "User" + name: "testRoleUser3" + - type: "Group" + name: "testRoleGroup3" + - type: "ServiceAccount" + name: "testrolesubject3" + namespace: "testrolenamespace" + - type: "RoleBinding" + namespace: "test5ns" + role: + kind: "Role" + name: "testRoleFromNS3" + subjects: + - type: "User" + name: "testUserRoleFromNS3" + - type: "Group" + name: "testGroupFromNS3" + - type: "ServiceAccount" + name: "testrolesubject3" + namespace: "testrolenamespace" + +namespaces: + - name: "test5ns" + resource_allocation: + cpu_cores: "2" + memory_MiB: "2048" + node_groups: - name: master-pool control_plane: true @@ -24,7 +57,7 @@ node_groups: - size_in_gb: 10 managed: false placements: - - appliance: "libvirt-nik15-mar-21" + - appliance: "libvirt-nik-mar-21" network_type: "bridge" network_names: "br0" network: "br" @@ -33,7 +66,7 @@ node_groups: data_storage_pool: "ehl_data" - name: worker-pool - count: 3 + count: 1 disk_size_gb: 60 memory_mb: 8192 cpu: 4 @@ -44,7 +77,7 @@ node_groups: - size_in_gb: 10 managed: true placements: - - appliance: "libvirt-nik15-mar-21" + - appliance: "libvirt-nik-mar-21" network_type: "bridge" network_names: "br0" network: "br" @@ -52,6 +85,17 @@ node_groups: target_storage_pool: "ehl_images" data_storage_pool: "ehl_data" + additional_labels: + addlabel: "addlabelval1" + + taints: + - key: "taintkey1" + value: "taintvalue1" + effect: "PreferNoSchedule" + - key: "taintkey2" + value: "taintvalue2" + effect: "NoSchedule" + scan_policy: configuration_scan_schedule: "0 0 * * SUN" penetration_scan_schedule: "0 0 * * SUN" diff --git a/examples/libvirt/config/profile/profile-addon-1.yaml b/examples/libvirt/config/profile/profile-addon-1.yaml index 89c5863..eb527fd 100644 --- a/examples/libvirt/config/profile/profile-addon-1.yaml +++ b/examples/libvirt/config/profile/profile-addon-1.yaml @@ -1,4 +1,4 @@ -name: addon-profile-1 +name: addon-profile-simyam-libvirt description: addon-profile-1 type: add-on cloudType: all diff --git a/examples/libvirt/config/profile/profile-libvirt-full-gpu.yaml b/examples/libvirt/config/profile/profile-libvirt-full-gpu.yaml index 41e8be3..b0b116e 100644 --- a/examples/libvirt/config/profile/profile-libvirt-full-gpu.yaml +++ b/examples/libvirt/config/profile/profile-libvirt-full-gpu.yaml @@ -152,10 +152,10 @@ packs: pack: content: images: - - gcr.io/spectro-images-public/calico/kube-controllers:v3.19.0 - - gcr.io/spectro-images-public/calico/node:v3.19.0 - - gcr.io/spectro-images-public/calico/cni:v3.19.0 - - gcr.io/spectro-images-public/calico/pod2daemon-flexvol:v3.19.0 + - image: gcr.io/spectro-images-public/calico/kube-controllers:v3.19.0 + - image: gcr.io/spectro-images-public/calico/node:v3.19.0 + - image: gcr.io/spectro-images-public/calico/cni:v3.19.0 + - image: gcr.io/spectro-images-public/calico/pod2daemon-flexvol:v3.19.0 manifests: calico: @@ -178,14 +178,14 @@ packs: pack: content: images: - - k8s.gcr.io/sig-storage/csi-node-driver-registrar:v2.3.0 - - k8s.gcr.io/sig-storage/csi-attacher:v3.3.0 - - k8s.gcr.io/sig-storage/csi-snapshotter:v4.2.0 - - k8s.gcr.io/sig-storage/csi-resizer:v1.3.0 - - k8s.gcr.io/sig-storage/csi-provisioner:v3.0.0 - - quay.io/cephcsi/cephcsi:v3.4.0 - - quay.io/ceph/ceph:v16.2.7 - - docker.io/rook/ceph:v1.8.0 + - image: k8s.gcr.io/sig-storage/csi-node-driver-registrar:v2.3.0 + - image: k8s.gcr.io/sig-storage/csi-attacher:v3.3.0 + - image: k8s.gcr.io/sig-storage/csi-snapshotter:v4.2.0 + - image: k8s.gcr.io/sig-storage/csi-resizer:v1.3.0 + - image: k8s.gcr.io/sig-storage/csi-provisioner:v3.0.0 + - image: quay.io/cephcsi/cephcsi:v3.4.0 + - image: quay.io/ceph/ceph:v16.2.7 + - image: docker.io/rook/ceph:v1.8.0 manifests: storageclass: diff --git a/examples/libvirt/config/profile/profile-system.yaml b/examples/libvirt/config/profile/profile-system.yaml index 721675e..ab1bd43 100644 --- a/examples/libvirt/config/profile/profile-system.yaml +++ b/examples/libvirt/config/profile/profile-system.yaml @@ -1,4 +1,4 @@ -name: system-profile +name: system-profile-simyam-libvirt description: system-profile type: system cloudType: all diff --git a/examples/libvirt/mod_spectro_org.tf b/examples/libvirt/mod_spectro_org.tf index 46e694d..002c102 100644 --- a/examples/libvirt/mod_spectro_org.tf +++ b/examples/libvirt/mod_spectro_org.tf @@ -1,7 +1,7 @@ locals { - accounts_params = { ACCOUNT_DEV_NAME = "dev-030", ACCOUNT_PROD_NAME = "prod-004" } + accounts_params = { ACCOUNT_DEV_NAME = "dev-030", ACCOUNT_PROD_NAME = "prod-004" } appliances_params = {} - bsl_params = { BSL_NAME = "qa-sharma" } + bsl_params = { BSL_NAME = "qa-sharma" } profile_params = { SPECTRO_REPO_URL = "https://registry.spectrocloud.com", REPO_URL = "593235963820.dkr.ecr.us-west-2.amazonaws.com", @@ -18,25 +18,20 @@ locals { module "SpectroOrg" { source = "../../" - accounts = { + /*accounts = { for k in fileset("config/account", "account-*.yaml") : trimsuffix(k, ".yaml") => yamldecode(templatefile("config/account/${k}", local.accounts_params)) - } - - appliances = { - for k in fileset("config/appliance", "appliance-*.yaml") : - trimsuffix(k, ".yaml") => yamldecode(templatefile("config/appliance/${k}", local.appliances_params)) - } + }*/ /*bsls = { for k in fileset("config/bsl", "bsl-*.yaml") : trimsuffix(k, ".yaml") => yamldecode(templatefile("config/bsl/${k}", local.bsl_params)) }*/ - /*profiles = { + profiles = { for k in fileset("config/profile", "profile-*.yaml") : trimsuffix(k, ".yaml") => yamldecode(templatefile("config/profile/${k}", local.profile_params)) - }*/ + } /*projects = { for k in fileset("config/project", "project-*.yaml") : @@ -55,6 +50,18 @@ module "SpectroOrg" { } +module "SpectroAppliances" { + depends_on = [module.SpectroOrg] + + source = "../../" + + /*appliances = { + for k in fileset("config/appliance", "appliance-*.yaml") : + trimsuffix(k, ".yaml") => yamldecode(templatefile("config/appliance/${k}", local.appliances_params)) + }*/ + +} + /* output "debug" { value = module.SpectroProject.libvirt-cluster @@ -62,11 +69,11 @@ output "debug" { */ module "SpectroProject" { - depends_on = [module.SpectroOrg] - source = "../../" + depends_on = [module.SpectroAppliances] + source = "../../" - /*clusters = { + clusters = { for k in fileset("config/cluster", "cluster-*.yaml") : trimsuffix(k, ".yaml") => yamldecode(templatefile("config/cluster/${k}", local.accounts_params)) - }*/ + } } diff --git a/examples/local-simplified-yaml/admin/README.md b/examples/local-simplified-yaml/admin/README.md new file mode 100644 index 0000000..881d0e4 --- /dev/null +++ b/examples/local-simplified-yaml/admin/README.md @@ -0,0 +1,6 @@ +# gitops-admin + +Copy `terraform.template.tfvars` file to `terraform.tfvars` and key in Spectro Cloud credentials and other details. +Make sure all files for project, profile have correct values and then run terraform. + +While copying files, just copy files present in this repo. \ No newline at end of file diff --git a/examples/local-simplified-yaml/admin/config/profile/profile-bm-infra.yaml b/examples/local-simplified-yaml/admin/config/profile/profile-bm-infra.yaml new file mode 100644 index 0000000..176907f --- /dev/null +++ b/examples/local-simplified-yaml/admin/config/profile/profile-bm-infra.yaml @@ -0,0 +1,613 @@ +name: bm-infra +description: "" +type: cluster +cloudType: libvirt +packs: + - name: sles-libvirt + type: spectro + layer: os + registry: Public Repo + version: 15.3 + tag: 15.3 + values: |- + kubeadmconfig: + preKubeadmCommands: + - echo "Executing pre kube admin config commands" + - update-ca-certificates + - 'systemctl restart containerd; sleep 3' + - 'while [ ! -S /var/run/containerd/containerd.sock ]; do echo "Waiting for containerd..."; sleep 1; done' + postKubeadmCommands: + - echo "Executing post kube admin config commands" + files: + - targetPath: /usr/share/pki/trust/anchors/ca.crt + targetOwner: "root:root" + targetPermissions: "0644" + content: | + -----BEGIN CERTIFICATE----- + MIIDozCCAougAwIBAgIQeO8XlqAMLhxvtCap35yktzANBgkqhkiG9w0BAQsFADBS + MQswCQYDVQQGEwJVUzEhMB8GA1UEChMYR2VuZXJhbCBFbGVjdHJpYyBDb21wYW55 + MSAwHgYDVQQDExdHRSBFeHRlcm5hbCBSb290IENBIDIuMTAeFw0xNTAzMDUwMDAw + MDBaFw0zNTAzMDQyMzU5NTlaMFIxCzAJBgNVBAYTAlVTMSEwHwYDVQQKExhHZW5l + cmFsIEVsZWN0cmljIENvbXBhbnkxIDAeBgNVBAMTF0dFIEV4dGVybmFsIFJvb3Qg + Q0EgMi4xMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAzCzT4wNRZtr2 + XTzoTMjppjulZfG35/nOt44q2zg47sxwgZ8o4qjcrwzIhsntoFrRQssjXSF5qXdC + zsm1G7f04qEBimuOH/X+CidWX+sudCS8VyRjXi9cyvUW4/mYKCLXv5M6HhEoIHCD + Xdo6yUr5mSrf18qRR3yUFz0HYXopa2Ls3Q6lBvEUO2Xw04vqVvmg1h7S5jYuZovC + oIbd2+4QGdoSZPgtSNpCxSR+NwtPpzYZpmqiUuDGfVpO3HU42APB0c60D91cJho6 + tZpXYHDsR/RxYGm02K/iMGefD5F4YMrtoKoHbskty6+u5FUOrUgGATJJGtxleg5X + KotQYu8P1wIDAQABo3UwczASBgNVHRMBAf8ECDAGAQH/AgECMA4GA1UdDwEB/wQE + AwIBBjAuBgNVHREEJzAlpCMwITEfMB0GA1UEAxMWR0UtUm9vdC1DT00tUlNBLTIw + NDgtMTAdBgNVHQ4EFgQU3N2mUCJBCLYgtpZyxBeBMJwNZuowDQYJKoZIhvcNAQEL + BQADggEBACF4Zsf2Nm0FpVNeADUH+sl8mFgwL7dfL7+6n7hOgH1ZXcv6pDkoNtVE + 0J/ZPdHJW6ntedKEZuizG5BCclUH3IyYK4/4GxNpFXugmWnKGy2feYwVae7Puyd7 + /iKOFEGCYx4C6E2kq3aFjJqiq1vbgSS/B0agt1D3rH3i/+dXVxx8ZjhyZMuN+cgS + pZL4gnhnSXFAGissxJhKsNkYgvKdOETRNn5lEgfgVyP2iOVqEguHk2Gu0gHSouLu + 5ad/qyN+Zgbjx8vEWlywmhXb78Gaf/AwSGAwQPtmQ0310a4DulGxo/kcuS78vFH1 + mwJmHm9AIFoqBi8XpuhGmQ0nvymurEk= + -----END CERTIFICATE----- + - name: kubernetes + type: spectro + layer: k8s + registry: Public Repo + version: 1.21.10 + tag: 1.21.10 + values: |- + pack: + k8sHardening: True + #CIDR Range for Pods in cluster + # Note : This must not overlap with any of the host or service network + podCIDR: "172.10.0.0/16" + #CIDR notation IP range from which to assign service cluster IPs + # Note : This must not overlap with any IP ranges assigned to nodes for pods. + serviceClusterIpRange: "11.0.0.0/22" + + # KubeAdm customization for kubernetes hardening. Below config will be ignored if k8sHardening property above is disabled + kubeadmconfig: + apiServer: + certSANs: + - "cluster-{{ .spectro.system.cluster.uid }}.{{ .spectro.system.reverseproxy.server }}" + extraArgs: + # Note : secure-port flag is used during kubeadm init. Do not change this flag on a running cluster + secure-port: "6443" + anonymous-auth: "true" + insecure-port: "0" + profiling: "false" + disable-admission-plugins: "AlwaysAdmit" + default-not-ready-toleration-seconds: "60" + default-unreachable-toleration-seconds: "60" + enable-admission-plugins: "NamespaceLifecycle,ServiceAccount,NodeRestriction,PodSecurityPolicy" + audit-log-path: /var/log/apiserver/audit.log + audit-policy-file: /etc/kubernetes/audit-policy.yaml + audit-log-maxage: "30" + audit-log-maxbackup: "10" + audit-log-maxsize: "100" + authorization-mode: RBAC,Node + tls-cipher-suites: "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256" + extraVolumes: + - name: audit-log + hostPath: /var/log/apiserver + mountPath: /var/log/apiserver + pathType: DirectoryOrCreate + - name: audit-policy + hostPath: /etc/kubernetes/audit-policy.yaml + mountPath: /etc/kubernetes/audit-policy.yaml + readOnly: true + pathType: File + controllerManager: + extraArgs: + profiling: "false" + terminated-pod-gc-threshold: "25" + pod-eviction-timeout: "1m0s" + use-service-account-credentials: "true" + feature-gates: "RotateKubeletServerCertificate=true" + scheduler: + extraArgs: + profiling: "false" + kubeletExtraArgs: + read-only-port : "0" + event-qps: "0" + feature-gates: "RotateKubeletServerCertificate=true" + protect-kernel-defaults: "true" + tls-cipher-suites: "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256" + files: + - path: hardening/audit-policy.yaml + targetPath: /etc/kubernetes/audit-policy.yaml + targetOwner: "root:root" + targetPermissions: "0600" + - path: hardening/privileged-psp.yaml + targetPath: /etc/kubernetes/hardening/privileged-psp.yaml + targetOwner: "root:root" + targetPermissions: "0600" + - path: hardening/90-kubelet.conf + targetPath: /etc/sysctl.d/90-kubelet.conf + targetOwner: "root:root" + targetPermissions: "0600" + preKubeadmCommands: + # For enabling 'protect-kernel-defaults' flag to kubelet, kernel parameters changes are required + - 'echo "====> Applying kernel parameters for Kubelet"' + - 'sysctl -p /etc/sysctl.d/90-kubelet.conf' + postKubeadmCommands: + # Apply the privileged PodSecurityPolicy on the first master node ; Otherwise, CNI (and other) pods won't come up + # Sometimes api server takes a little longer to respond. Retry if applying the pod-security-policy manifest fails + - 'export KUBECONFIG=/etc/kubernetes/admin.conf && [ -f "$KUBECONFIG" ] && { echo " ====> Applying PodSecurityPolicy" ; until $(kubectl apply -f /etc/kubernetes/hardening/privileged-psp.yaml > /dev/null ); do echo "Failed to apply PodSecurityPolicies, will retry in 5s" ; sleep 5 ; done ; } || echo "Skipping PodSecurityPolicy for worker nodes"' + + # Client configuration to add OIDC based authentication flags in kubeconfig + #clientConfig: + #oidc-issuer-url: "{{ .spectro.pack.kubernetes.kubeadmconfig.apiServer.extraArgs.oidc-issuer-url }}" + #oidc-client-id: "{{ .spectro.pack.kubernetes.kubeadmconfig.apiServer.extraArgs.oidc-client-id }}" + #oidc-client-secret: 1gsranjjmdgahm10j8r6m47ejokm9kafvcbhi3d48jlc3rfpprhv + #oidc-extra-scope: profile,email + - name: cni-calico + type: spectro + layer: cni + registry: Public Repo + version: 3.19.0 + tag: 3.19.0 + values: |- + pack: + content: + images: + - image: gcr.io/spectro-images-public/calico/kube-controllers:v3.19.0 + - image: gcr.io/spectro-images-public/calico/node:v3.19.0 + - image: gcr.io/spectro-images-public/calico/cni:v3.19.0 + - image: gcr.io/spectro-images-public/calico/pod2daemon-flexvol:v3.19.0 + + manifests: + calico: + + # IPAM type to use. Supported types are calico-ipam, host-local + ipamType: "calico-ipam" + + # Should be one of CALICO_IPV4POOL_IPIP or CALICO_IPV4POOL_VXLAN + encapsulationType: "CALICO_IPV4POOL_IPIP" + + # Should be one of Always, CrossSubnet, Never + encapsulationMode: "Always" + - name: csi-rook-ceph + type: spectro + layer: csi + registry: Public Repo + version: 1.8.0 + tag: 1.8.0 + values: |- + pack: + content: + images: + - image: k8s.gcr.io/sig-storage/csi-node-driver-registrar:v2.3.0 + - image: k8s.gcr.io/sig-storage/csi-attacher:v3.3.0 + - image: k8s.gcr.io/sig-storage/csi-snapshotter:v4.2.0 + - image: k8s.gcr.io/sig-storage/csi-resizer:v1.3.0 + - image: k8s.gcr.io/sig-storage/csi-provisioner:v3.0.0 + - image: quay.io/cephcsi/cephcsi:v3.4.0 + - image: quay.io/ceph/ceph:v16.2.7 + - image: docker.io/rook/ceph:v1.8.0 + + manifests: + storageclass: + contents: | + apiVersion: ceph.rook.io/v1 + kind: CephFilesystem + metadata: + name: myfs + namespace: rook-ceph # namespace:cluster + spec: + # The metadata pool spec. Must use replication. + metadataPool: + replicated: + size: 3 + requireSafeReplicaSize: true + parameters: + # Inline compression mode for the data pool + # Further reference: https://docs.ceph.com/docs/master/rados/configuration/bluestore-config-ref/#inline-compression + compression_mode: + none + # gives a hint (%) to Ceph in terms of expected consumption of the total cluster capacity of a given pool + # for more info: https://docs.ceph.com/docs/master/rados/operations/placement-groups/#specifying-expected-pool-size + #target_size_ratio: ".5" + # The list of data pool specs. Can use replication or erasure coding. + dataPools: + - name: replicated + failureDomain: host + replicated: + size: 3 + # Disallow setting pool with replica 1, this could lead to data loss without recovery. + # Make sure you're *ABSOLUTELY CERTAIN* that is what you want + requireSafeReplicaSize: true + parameters: + # Inline compression mode for the data pool + # Further reference: https://docs.ceph.com/docs/master/rados/configuration/bluestore-config-ref/#inline-compression + compression_mode: + none + # gives a hint (%) to Ceph in terms of expected consumption of the total cluster capacity of a given pool + # for more info: https://docs.ceph.com/docs/master/rados/operations/placement-groups/#specifying-expected-pool-size + #target_size_ratio: ".5" + # Whether to preserve filesystem after CephFilesystem CRD deletion + preserveFilesystemOnDelete: true + # The metadata service (mds) configuration + metadataServer: + # The number of active MDS instances + activeCount: 1 + # Whether each active MDS instance will have an active standby with a warm metadata cache for faster failover. + # If false, standbys will be available, but will not have a warm cache. + activeStandby: true + # The affinity rules to apply to the mds deployment + placement: + # nodeAffinity: + # requiredDuringSchedulingIgnoredDuringExecution: + # nodeSelectorTerms: + # - matchExpressions: + # - key: role + # operator: In + # values: + # - mds-node + # topologySpreadConstraints: + # tolerations: + # - key: mds-node + # operator: Exists + # podAffinity: + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchExpressions: + - key: app + operator: In + values: + - rook-ceph-mds + # topologyKey: kubernetes.io/hostname will place MDS across different hosts + topologyKey: kubernetes.io/hostname + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 100 + podAffinityTerm: + labelSelector: + matchExpressions: + - key: app + operator: In + values: + - rook-ceph-mds + # topologyKey: */zone can be used to spread MDS across different AZ + # Use in k8s cluster if your cluster is v1.16 or lower + # Use in k8s cluster is v1.17 or upper + topologyKey: topology.kubernetes.io/zone + # A key/value list of annotations + annotations: + # key: value + # A key/value list of labels + labels: + # key: value + resources: + # The requests and limits set here, allow the filesystem MDS Pod(s) to use half of one CPU core and 1 gigabyte of memory + # limits: + # cpu: "500m" + # memory: "1024Mi" + # requests: + # cpu: "500m" + # memory: "1024Mi" + # priorityClassName: my-priority-class + # Filesystem mirroring settings + # mirroring: + # enabled: true + # list of Kubernetes Secrets containing the peer token + # for more details see: https://docs.ceph.com/en/latest/dev/cephfs-mirroring/#bootstrap-peers + # peers: + #secretNames: + #- secondary-cluster-peer + # specify the schedule(s) on which snapshots should be taken + # see the official syntax here https://docs.ceph.com/en/latest/cephfs/snap-schedule/#add-and-remove-schedules + # snapshotSchedules: + # - path: / + # interval: 24h # daily snapshots + # startTime: 11:55 + # manage retention policies + # see syntax duration here https://docs.ceph.com/en/latest/cephfs/snap-schedule/#add-and-remove-retention-policies + # snapshotRetention: + # - path: / + # duration: "h 24" + --- + apiVersion: storage.k8s.io/v1 + kind: StorageClass + metadata: + name: standard + annotations: + storageclass.kubernetes.io/is-default-class: "true" + # Change "rook-ceph" provisioner prefix to match the operator namespace if needed + provisioner: rook-ceph.cephfs.csi.ceph.com # driver:namespace:operator + parameters: + # clusterID is the namespace where the rook cluster is running + # If you change this namespace, also change the namespace below where the secret namespaces are defined + clusterID: rook-ceph # namespace:cluster + + # CephFS filesystem name into which the volume shall be created + fsName: myfs + + # Ceph pool into which the volume shall be created + # Required for provisionVolume: "true" + pool: myfs-data0 + + # The secrets contain Ceph admin credentials. These are generated automatically by the operator + # in the same namespace as the cluster. + csi.storage.k8s.io/provisioner-secret-name: rook-csi-cephfs-provisioner + csi.storage.k8s.io/provisioner-secret-namespace: rook-ceph # namespace:cluster + csi.storage.k8s.io/controller-expand-secret-name: rook-csi-cephfs-provisioner + csi.storage.k8s.io/controller-expand-secret-namespace: rook-ceph # namespace:cluster + csi.storage.k8s.io/node-stage-secret-name: rook-csi-cephfs-node + csi.storage.k8s.io/node-stage-secret-namespace: rook-ceph # namespace:cluster + + # (optional) The driver can use either ceph-fuse (fuse) or ceph kernel client (kernel) + # If omitted, default volume mounter will be used - this is determined by probing for ceph-fuse + # or by setting the default mounter explicitly via --volumemounter command-line argument. + # mounter: kernel + reclaimPolicy: Delete + allowVolumeExpansion: true + #Supported binding modes are Immediate, WaitForFirstConsumer + volumeBindingMode: "WaitForFirstConsumer" + mountOptions: + # uncomment the following line for debugging + #- debug + + cluster: + contents: | + apiVersion: ceph.rook.io/v1 + kind: CephCluster + metadata: + name: rook-ceph + namespace: rook-ceph # namespace:cluster + spec: + cephVersion: + # The container image used to launch the Ceph daemon pods (mon, mgr, osd, mds, rgw). + # v15 is octopus, and v16 is pacific. + # RECOMMENDATION: In production, use a specific version tag instead of the general v14 flag, which pulls the latest release and could result in different + # versions running within the cluster. See tags available at https://hub.docker.com/r/ceph/ceph/tags/. + # If you want to be more precise, you can always use a timestamp tag such quay.io/ceph/ceph:v16.2.7-20211208 + # This tag might not contain a new Ceph version, just security fixes from the underlying operating system, which will reduce vulnerabilities + image: quay.io/ceph/ceph:v16.2.7 + # Whether to allow unsupported versions of Ceph. Currently `octopus` and `pacific` are supported. + # Future versions such as `pacific` would require this to be set to `true`. + # Do not set to true in production. + allowUnsupported: false + # The path on the host where configuration files will be persisted. Must be specified. + # Important: if you reinstall the cluster, make sure you delete this directory from each host or else the mons will fail to start on the new cluster. + # In Minikube, the '/data' directory is configured to persist across reboots. Use "/data/rook" in Minikube environment. + dataDirHostPath: /var/lib/rook + # Whether or not upgrade should continue even if a check fails + # This means Ceph's status could be degraded and we don't recommend upgrading but you might decide otherwise + # Use at your OWN risk + # To understand Rook's upgrade process of Ceph, read https://rook.io/docs/rook/latest/ceph-upgrade.html#ceph-version-upgrades + skipUpgradeChecks: false + # Whether or not continue if PGs are not clean during an upgrade + continueUpgradeAfterChecksEvenIfNotHealthy: false + # WaitTimeoutForHealthyOSDInMinutes defines the time (in minutes) the operator would wait before an OSD can be stopped for upgrade or restart. + # If the timeout exceeds and OSD is not ok to stop, then the operator would skip upgrade for the current OSD and proceed with the next one + # if `continueUpgradeAfterChecksEvenIfNotHealthy` is `false`. If `continueUpgradeAfterChecksEvenIfNotHealthy` is `true`, then opertor would + # continue with the upgrade of an OSD even if its not ok to stop after the timeout. This timeout won't be applied if `skipUpgradeChecks` is `true`. + # The default wait timeout is 10 minutes. + waitTimeoutForHealthyOSDInMinutes: 10 + mon: + # Set the number of mons to be started. Generally recommended to be 3. + # For highest availability, an odd number of mons should be specified. + count: 3 + # The mons should be on unique nodes. For production, at least 3 nodes are recommended for this reason. + # Mons should only be allowed on the same node for test environments where data loss is acceptable. + allowMultiplePerNode: false + mgr: + # When higher availability of the mgr is needed, increase the count to 2. + # In that case, one mgr will be active and one in standby. When Ceph updates which + # mgr is active, Rook will update the mgr services to match the active mgr. + count: 1 + modules: + # Several modules should not need to be included in this list. The "dashboard" and "monitoring" modules + # are already enabled by other settings in the cluster CR. + - name: pg_autoscaler + enabled: true + # enable the ceph dashboard for viewing cluster status + dashboard: + enabled: true + # serve the dashboard under a subpath (useful when you are accessing the dashboard via a reverse proxy) + # urlPrefix: /ceph-dashboard + # serve the dashboard at the given port. + # port: 8443 + # serve the dashboard using SSL + ssl: true + # enable prometheus alerting for cluster + monitoring: + # requires Prometheus to be pre-installed + enabled: false + # namespace to deploy prometheusRule in. If empty, namespace of the cluster will be used. + # Recommended: + # If you have a single rook-ceph cluster, set the rulesNamespace to the same namespace as the cluster or keep it empty. + # If you have multiple rook-ceph clusters in the same k8s cluster, choose the same namespace (ideally, namespace with prometheus + # deployed) to set rulesNamespace for all the clusters. Otherwise, you will get duplicate alerts with multiple alert definitions. + rulesNamespace: rook-ceph + network: + # enable host networking + #provider: host + # enable the Multus network provider + #provider: multus + #selectors: + # The selector keys are required to be `public` and `cluster`. + # Based on the configuration, the operator will do the following: + # 1. if only the `public` selector key is specified both public_network and cluster_network Ceph settings will listen on that interface + # 2. if both `public` and `cluster` selector keys are specified the first one will point to 'public_network' flag and the second one to 'cluster_network' + # + # In order to work, each selector value must match a NetworkAttachmentDefinition object in Multus + # + #public: public-conf --> NetworkAttachmentDefinition object name in Multus + #cluster: cluster-conf --> NetworkAttachmentDefinition object name in Multus + # Provide internet protocol version. IPv6, IPv4 or empty string are valid options. Empty string would mean IPv4 + #ipFamily: "IPv6" + # Ceph daemons to listen on both IPv4 and Ipv6 networks + #dualStack: false + # enable the crash collector for ceph daemon crash collection + crashCollector: + disable: false + # Uncomment daysToRetain to prune ceph crash entries older than the + # specified number of days. + #daysToRetain: 30 + # enable log collector, daemons will log on files and rotate + # logCollector: + # enabled: true + # periodicity: 24h # SUFFIX may be 'h' for hours or 'd' for days. + # automate [data cleanup process](https://github.com/rook/rook/blob/master/Documentation/ceph-teardown.md#delete-the-data-on-hosts) in cluster destruction. + cleanupPolicy: + # Since cluster cleanup is destructive to data, confirmation is required. + # To destroy all Rook data on hosts during uninstall, confirmation must be set to "yes-really-destroy-data". + # This value should only be set when the cluster is about to be deleted. After the confirmation is set, + # Rook will immediately stop configuring the cluster and only wait for the delete command. + # If the empty string is set, Rook will not destroy any data on hosts during uninstall. + confirmation: "" + # sanitizeDisks represents settings for sanitizing OSD disks on cluster deletion + sanitizeDisks: + # method indicates if the entire disk should be sanitized or simply ceph's metadata + # in both case, re-install is possible + # possible choices are 'complete' or 'quick' (default) + method: quick + # dataSource indicate where to get random bytes from to write on the disk + # possible choices are 'zero' (default) or 'random' + # using random sources will consume entropy from the system and will take much more time then the zero source + dataSource: zero + # iteration overwrite N times instead of the default (1) + # takes an integer value + iteration: 1 + # allowUninstallWithVolumes defines how the uninstall should be performed + # If set to true, cephCluster deletion does not wait for the PVs to be deleted. + allowUninstallWithVolumes: false + # To control where various services will be scheduled by kubernetes, use the placement configuration sections below. + # The example under 'all' would have all services scheduled on kubernetes nodes labeled with 'role=storage-node' and + # tolerate taints with a key of 'storage-node'. + # placement: + # all: + # nodeAffinity: + # requiredDuringSchedulingIgnoredDuringExecution: + # nodeSelectorTerms: + # - matchExpressions: + # - key: role + # operator: In + # values: + # - storage-node + # podAffinity: + # podAntiAffinity: + # topologySpreadConstraints: + # tolerations: + # - key: storage-node + # operator: Exists + # The above placement information can also be specified for mon, osd, and mgr components + # mon: + # Monitor deployments may contain an anti-affinity rule for avoiding monitor + # collocation on the same node. This is a required rule when host network is used + # or when AllowMultiplePerNode is false. Otherwise this anti-affinity rule is a + # preferred rule with weight: 50. + # osd: + # mgr: + # cleanup: + annotations: + # all: + # mon: + # osd: + # cleanup: + # prepareosd: + # If no mgr annotations are set, prometheus scrape annotations will be set by default. + # mgr: + labels: + # all: + # mon: + # osd: + # cleanup: + # mgr: + # prepareosd: + # monitoring is a list of key-value pairs. It is injected into all the monitoring resources created by operator. + # These labels can be passed as LabelSelector to Prometheus + # monitoring: + # crashcollector: + resources: + # The requests and limits set here, allow the mgr pod to use half of one CPU core and 1 gigabyte of memory + # mgr: + # limits: + # cpu: "500m" + # memory: "1024Mi" + # requests: + # cpu: "500m" + # memory: "1024Mi" + # The above example requests/limits can also be added to the other components + # mon: + # osd: + # For OSD it also is a possible to specify requests/limits based on device class + # osd-hdd: + # osd-ssd: + # osd-nvme: + # prepareosd: + # mgr-sidecar: + # crashcollector: + # logcollector: + # cleanup: + # The option to automatically remove OSDs that are out and are safe to destroy. + removeOSDsIfOutAndSafeToRemove: true + # priorityClassNames: + # all: rook-ceph-default-priority-class + # mon: rook-ceph-mon-priority-class + # osd: rook-ceph-osd-priority-class + # mgr: rook-ceph-mgr-priority-class + storage: # cluster level storage configuration and selection + useAllNodes: true + useAllDevices: true + #deviceFilter: + config: + # crushRoot: "custom-root" # specify a non-default root label for the CRUSH map + # metadataDevice: "md0" # specify a non-rotational storage so ceph-volume will use it as block db device of bluestore. + # databaseSizeMB: "1024" # uncomment if the disks are smaller than 100 GB + # journalSizeMB: "1024" # uncomment if the disks are 20 GB or smaller + # osdsPerDevice: "1" # this value can be overridden at the node or device level + # encryptedDevice: "true" # the default value for this option is "false" + # Individual nodes and their config can be specified as well, but 'useAllNodes' above must be set to false. Then, only the named + # nodes below will be used as storage resources. Each node's 'name' field should match their 'kubernetes.io/hostname' label. + # nodes: + # - name: "172.17.4.201" + # devices: # specific devices to use for storage can be specified for each node + # - name: "sdb" + # - name: "nvme01" # multiple osds can be created on high performance devices + # config: + # osdsPerDevice: "5" + # - name: "/dev/disk/by-id/ata-ST4000DM004-XXXX" # devices can be specified using full udev paths + # config: # configuration can be specified at the node level which overrides the cluster level config + # - name: "172.17.4.301" + # deviceFilter: "^sd." + # when onlyApplyOSDPlacement is false, will merge both placement.All() and placement.osd + onlyApplyOSDPlacement: false + # The section for configuring management of daemon disruptions during upgrade or fencing. + disruptionManagement: + # If true, the operator will create and manage PodDisruptionBudgets for OSD, Mon, RGW, and MDS daemons. OSD PDBs are managed dynamically + # via the strategy outlined in the [design](https://github.com/rook/rook/blob/master/design/ceph/ceph-managed-disruptionbudgets.md). The operator will + # block eviction of OSDs by default and unblock them safely when drains are detected. + managePodBudgets: true + # A duration in minutes that determines how long an entire failureDomain like `region/zone/host` will be held in `noout` (in addition to the + # default DOWN/OUT interval) when it is draining. This is only relevant when `managePodBudgets` is `true`. The default value is `30` minutes. + osdMaintenanceTimeout: 30 + # A duration in minutes that the operator will wait for the placement groups to become healthy (active+clean) after a drain was completed and OSDs came back up. + # Operator will continue with the next drain if the timeout exceeds. It only works if `managePodBudgets` is `true`. + # No values or 0 means that the operator will wait until the placement groups are healthy before unblocking the next drain. + pgHealthCheckTimeout: 0 + # If true, the operator will create and manage MachineDisruptionBudgets to ensure OSDs are only fenced when the cluster is healthy. + # Only available on OpenShift. + manageMachineDisruptionBudgets: false + # Namespace in which to watch for the MachineDisruptionBudgets. + machineDisruptionBudgetNamespace: openshift-machine-api + + # healthChecks + # Valid values for daemons are 'mon', 'osd', 'status' + healthCheck: + daemonHealth: + mon: + disabled: false + interval: 45s + osd: + disabled: false + interval: 60s + status: + disabled: false + interval: 60s + # Change pod liveness probe, it works for all mon,mgr,osd daemons + livenessProbe: + mon: + disabled: false + mgr: + disabled: false + osd: + disabled: false \ No newline at end of file diff --git a/examples/local-simplified-yaml/admin/config/profile/profile-ehl-apps.yaml.x b/examples/local-simplified-yaml/admin/config/profile/profile-ehl-apps.yaml.x new file mode 100644 index 0000000..6111a62 --- /dev/null +++ b/examples/local-simplified-yaml/admin/config/profile/profile-ehl-apps.yaml.x @@ -0,0 +1,277 @@ +name: ehl-apps +description: "" +type: add-on +packs: +- name: ehl-generic + type: manifest + layer: addon + registry: helm-blr-ees + manifests: + - name: namespaces + content: |- + --- + ########################### + # NAME SPACES CREATION + ############################ + apiVersion: v1 + kind: Namespace + metadata: + name: edison-system + --- + apiVersion: v1 + kind: Namespace + metadata: + name: ehl-control-pplane + --- + apiVersion: v1 + kind: Namespace + metadata: + name: edison-core + --- + apiVersion: v1 + kind: Namespace + metadata: + name: edison-policy + --- + apiVersion: v1 + kind: Namespace + metadata: + name: kubeaddons + --- + apiVersion: v1 + kind: Namespace + metadata: + name: edison-priority-scheduler + --- + - name: configmap + content: |- + apiVersion: v1 + kind: ConfigMap + metadata: + name: ehl-generic-map + namespace: ehl-control-plane + data: + bootstrap_host: 192.168.100.10 + bootstrap_pswd: Minda00$ + bootstrap_user: root + ehl_version: EHL-2.0-SC-dev + version: 1.0.0 + values: |- + pack: + spectrocloud.com/install-priority: "-100" +- name: ehl-monitoring + type: helm + layer: addon + registry: helm-blr-ees + version: 2.0.1-af15864 + tag: 2.0.1-af15864 + values: |- + pack: + namespace: "edison-system" + spectrocloud.com/install-priority: "110" + releaseNameOverride: + ehl-monitoring: ehl-monitoring + content: + charts: + - repo: https://blr-artifactory.cloud.health.ge.com/artifactory/helm-ees-all/ + name: ehl-monitoring + version: 2.0.1-af15864 + images: + - image: blr-artifactory.cloud.health.ge.com/docker-eis-all/prom-opr/prometheus:v2.22.1 + - image: blr-artifactory.cloud.health.ge.com/docker-eis-all/prom-opr/prometheus-operator:v0.44.0 + - image: blr-artifactory.cloud.health.ge.com/docker-eis-all/prom-opr/kube-webhook-certgen:v1.5.2 + - image: blr-artifactory.cloud.health.ge.com/docker-eis-all/prom-opr/configmap-reload:v0.4.0 + - image: blr-artifactory.cloud.health.ge.com/docker-eis-all/prom-opr/prometheus-config-reloader:v0.44.0 + - image: blr-artifactory.cloud.health.ge.com/docker-eis-all/prom-opr/alertmanager:v0.21.0 + - image: blr-artifactory.cloud.health.ge.com/docker-eis-all/prom-opr/kube-state-metrics:v1.9.7 + - image: blr-artifactory.cloud.health.ge.com/docker-eis-all/prom-opr/node-exporter:v1.0.1 + - image: blr-artifactory.cloud.health.ge.com/docker-eis-all/ehl-monitoring/chronyntpexporter:2.0.1-af15864 + - image: blr-artifactory.cloud.health.ge.com/docker-eis-all/ehl-monitoring/validationwebhook:2.0.1-af15864 + - image: blr-artifactory.cloud.health.ge.com/docker-eis-all/ehl-monitoring/ext_svc_config:2.0.1-af15864 + - image: blr-artifactory.cloud.health.ge.com/docker-eis-all/prom-opr/blackbox-exporter:v0.18.0 + + external-svc-config: + enabled: false +- name: ehl-logging + type: helm + layer: addon + registry: helm-blr-ees + version: 0.0.1-9478efa + tag: 0.0.1-9478efa + values: |- + pack: + namespace: "edison-system" + spectrocloud.com/install-priority: "120" + releaseNameOverride: + ehl-logging: ehl-logging + content: + charts: + - repo: https://blr-artifactory.cloud.health.ge.com/artifactory/helm-ees-all/ + name: ehl-logging + version: 0.0.1-9478efa + images: + - image: blr-artifactory.cloud.health.ge.com/docker-eis-all/logging-stack/fluentd:1.12.4-debian-10-r3 + - image: blr-artifactory.cloud.health.ge.com/docker-eis-all/ehl-logging/ehl-alpine-nginx:0.0.1-9478efa + - image: blr-artifactory.cloud.health.ge.com/docker-eis-all/logging-stack/fluent-bit-plugin-loki:2.0.0-amd64 + - image: blr-artifactory.cloud.health.ge.com/docker-eis-all/logging-stack/loki:2.0.0 +- name: ehl-monitoring-security + type: helm + layer: addon + registry: helm-blr-ees + version: 0.0.1-30c63f5 + tag: 0.0.1-30c63f5 + values: |- + pack: + namespace: "edison-system" + spectrocloud.com/install-priority: "130" + releaseNameOverride: + ehl-monitoring-security: ehl-monitoring-security + content: + charts: + - repo: https://blr-artifactory.cloud.health.ge.com/artifactory/helm-ees-all/ + name: ehl-monitoring-security + version: 0.0.1-30c63f5 +- name: ehl-grafana + type: helm + layer: addon + registry: helm-blr-ees + version: 0.0.1-4260767 + tag: 0.0.1-4260767 + values: |- + pack: + namespace: "edison-system" + spectrocloud.com/install-priority: "140" + releaseNameOverride: + ehl-grafana: ehl-grafana + content: + charts: + - repo: https://blr-artifactory.cloud.health.ge.com/artifactory/helm-ees-all/ + name: ehl-grafana + version: 0.0.1-4260767 + images: + - image: blr-artifactory.cloud.health.ge.com/docker-eis-all/logging-stack/grafana:8.3.4 + - image: blr-artifactory.cloud.health.ge.com/docker-eis-all/logging-stack/k8s-sidecar:1.15.4 + - image: blr-artifactory.cloud.health.ge.com/docker-eis-all/logging-stack/busybox:1.35.0 +- name: sprsnapshot-service + type: helm + layer: addon + registry: helm-blr-ees + version: 1.0.0-a7da622 + tag: 1.0.0-a7da622 + values: |- + pack: + namespace: "edison-system" + releaseNameOverride: + sprsnapshot-service: ehl-sprsnap-service + spectrocloud.com/install-priority: "150" + content: + charts: + - repo: https://blr-artifactory.cloud.health.ge.com/artifactory/helm-ees-all/ + name: sprsnapshot-service + version: 1.0.0-a7da622 + images: + - image: blr-artifactory.cloud.health.ge.com/docker-eis-all/ehl-sprsnap-service/ehl-sprsnap-service:1.0.0-a7da622 + - image: blr-artifactory.cloud.health.ge.com/docker-eis-all/ehl-sprsnap-service/ehl-alpine-nginx-sprsnap:1.0.0-a7da622 +- name: ehl-metacontroller + type: helm + layer: addon + registry: helm-blr-ees + version: 0.0.1-aa67f86 + tag: 0.0.1-aa67f86 + values: |- + pack: + namespace: "edison-system" + releaseNameOverride: + ehl-metacontroller: ehl-metacontroller + spectrocloud.com/install-priority: "170" + content: + charts: + - repo: https://blr-artifactory.cloud.health.ge.com/artifactory/helm-ees-all/ + name: ehl-metacontroller + version: 0.0.1-aa67f86 + images: + - image: blr-artifactory.cloud.health.ge.com/docker-eis-all/metacontrollerio/metacontroller:v1.0.3 +- name: eis-postgres + type: helm + layer: addon + registry: helm-blr-ees + version: 2.0.0-d2433f4 + tag: 2.0.0-d2433f4 + values: |- + pack: + namespace: "edison-system" + releaseNameOverride: + eis-postgres: eis-common-postgres + spectrocloud.com/install-priority: "180" + content: + charts: + - repo: https://blr-artifactory.cloud.health.ge.com/artifactory/helm-ees-all/ + name: eis-postgres + version: 2.0.0-d2433f4 + images: + - image: blr-artifactory.cloud.health.ge.com/docker-eis-all/postgres-operator/acid/postgres-operator:v1.6.0 + - image: blr-artifactory.cloud.health.ge.com/docker-eis-all/postgres-operator/acid/spilo-12:1.6-p5 + - image: blr-artifactory.cloud.health.ge.com/docker-eis-all/eis-postgres/precheck:2.0.0-d2433f4 + - image: blr-artifactory.cloud.health.ge.com/docker-eis-all/eis-postgres/post_delete_hook:2.0.0-d2433f4 + - image: blr-artifactory.cloud.health.ge.com/docker-eis-all/wrouesnel/postgres_exporter:v0.8.0 + - image: blr-artifactory.cloud.health.ge.com/docker-eis-all/eis-postgres/postgres_import_export:2.0.0-d2433f4 + - image: blr-artifactory.cloud.health.ge.com/docker-eis-all/eis-postgres/webhook:2.0.0-d2433f4 + + postgres-operator: + enabled: true + eisDicomRsDb: + enabled: false + postgres-import-export: + enabled: true + postgres-monitoring: + enabled: true + eespostgresaccount: + enabled: true +- name: edison-priority-scheduler + type: helm + layer: addon + registry: helm-blr-ees + version: 1.0.0-654a62d + tag: 1.0.0-654a62d + values: |- + pack: + content: + charts: + - repo: https://blr-artifactory.cloud.health.ge.com/artifactory/helm-ees-all + name: edison-priority-scheduler + version: 1.0.0-654a62d + images: + - image: blr-artifactory.cloud.health.ge.com/docker-eis-all/edison-priority-scheduler/edison-priority-scheduler:1.0.0-654a62d + - image: blr-artifactory.cloud.health.ge.com/docker-eis-all/ehl-redis/redis:6.2.5 + - hc-eu-west-aws-artifactory.cloud.health.ge.com/docker-eis-dev/eps-test-client:latest + namespace: "edison-priority-scheduler" + releaseNameOverride: + edison-priority-scheduler: edison-priority-scheduler + spectrocloud.com/install-priority: "200" + + global: + resource_config: + manual: true + prometheusRules: + enabled: false +- name: eis-dicom-postgres--eis-postgres + type: helm + layer: addon + registry: helm-blr-ees + version: 2.0.0-d2433f4 + tag: 2.0.0-d2433f4 + values: "pack:\n namespace: \"edison-system\"\n releaseNameOverride:\n eis-postgres: + eis-dicom-postgres\n spectrocloud.com/install-priority: \"700\"\n spectrocloud.com/display-name: + \"eis-dicom-postgres\"\n content:\n charts:\n - repo: https://blr-artifactory.cloud.health.ge.com/artifactory/helm-ees-all/\n + \ name: eis-postgres\n version: 2.0.0-d2433f4\n images:\n - + image: blr-artifactory.cloud.health.ge.com/docker-eis-all/postgres-operator/acid/postgres-operator:v1.6.0\n + \ - image: blr-artifactory.cloud.health.ge.com/docker-eis-all/postgres-operator/acid/spilo-12:1.6-p5\n + \ - image: blr-artifactory.cloud.health.ge.com/docker-eis-all/eis-postgres/precheck:2.0.0-d2433f4\n + \ - image: blr-artifactory.cloud.health.ge.com/docker-eis-all/eis-postgres/post_delete_hook:2.0.0-d2433f4\n + \ - image: blr-artifactory.cloud.health.ge.com/docker-eis-all/wrouesnel/postgres_exporter:v0.8.0\n + \ - image: blr-artifactory.cloud.health.ge.com/docker-eis-all/eis-postgres/postgres_import_export:2.0.0-d2433f4\n + \ - image: blr-artifactory.cloud.health.ge.com/docker-eis-all/eis-postgres/webhook:2.0.0-d2433f4 + \n \n postgres-operator:\n enabled: false\n eisDicomRsDb:\n enabled: + true\n postgres-import-export:\n enabled: false\n postgres-monitoring:\n + \ enabled: false\n eespostgresaccount:\n enabled: false" +cloudType: all \ No newline at end of file diff --git a/examples/local-simplified-yaml/admin/config/profile/profile-ehl-core.yaml.x b/examples/local-simplified-yaml/admin/config/profile/profile-ehl-core.yaml.x new file mode 100644 index 0000000..27ae982 --- /dev/null +++ b/examples/local-simplified-yaml/admin/config/profile/profile-ehl-core.yaml.x @@ -0,0 +1,55 @@ +name: ehl-core +description: "" +type: add-on +packs: +- name: bootstrap + type: helm + layer: addon + registry: helm-blr-ees + version: 1.0.0-7ff1498 + tag: 1.0.0-7ff1498 + values: |- + pack: + namespace: "ehl-control-plane" + spectrocloud.com/install-priority: "10" + content: + charts: + - repo: https://blr-artifactory.cloud.health.ge.com/artifactory/helm-ees-all + name: bootstrap + version: 1.0.0-7ff1498 +- name: clustermgr-service + type: helm + layer: addon + registry: helm-blr-ees + version: 1.0.0-f4a4859 + tag: 1.0.0-f4a4859 + values: |- + pack: + namespace: "ehl-control-plane" + spectrocloud.com/install-priority: "20" + content: + charts: + - repo: https://blr-artifactory.cloud.health.ge.com/artifactory/helm-ees-all + name: clustermgr-service + version: 1.0.0-f4a4859 + images: + - image: blr-artifactory.cloud.health.ge.com/docker-eis-all/cluster-mgr/nginx_sidecar:1.0.0-f4a4859 + - image: blr-artifactory.cloud.health.ge.com/docker-eis-all/cluster-mgr/cluster-mgr:1.0.0-f4a4859 +- name: host-service + type: helm + layer: addon + registry: helm-blr-ees + version: 1.0.0-bcd39e0 + tag: 1.0.0-bcd39e0 + values: |- + pack: + namespace: "ehl-control-plane" + spectrocloud.com/install-priority: "30" + content: + charts: + - repo: https://blr-artifactory.cloud.health.ge.com/artifactory/helm-ees-all + name: host-service + version: 1.0.0-bcd39e0 + images: + - image: blr-artifactory.cloud.health.ge.com/docker-eis-all/ehl-host-service/ehl-host-service:1.0.0-bcd39e0 +cloudType: all \ No newline at end of file diff --git a/examples/local-simplified-yaml/admin/config/profile/profile-spectro-core.yaml b/examples/local-simplified-yaml/admin/config/profile/profile-spectro-core.yaml new file mode 100644 index 0000000..d3bb314 --- /dev/null +++ b/examples/local-simplified-yaml/admin/config/profile/profile-spectro-core.yaml @@ -0,0 +1,50 @@ +name: spectro-core +description: spectro-core +type: add-on +cloudType: all +packs: + - name: "spectro-proxy" + registry: Public Repo + type: "spectro" + layer: "addon" + version: "1.0.0" + values: | + pack: + spectrocloud.com/install-priority: "-200" + content: + images: + - image: gcr.io/spectro-images-public/release/frpc:v1.0.0 + - image: gcr.io/spectro-images-public/release/frpc-init:v1.0.0 + artifacts: + - source: https://rishi-public-bucket.s3.us-west-2.amazonaws.com/content/web/spectro.png + + + manifests: + spectro-proxy: + namespace: "cluster-{{ .spectro.system.cluster.uid }}" + server: "{{ .spectro.system.reverseproxy.server }}" + clusterUid: "{{ .spectro.system.cluster.uid }}" + subdomain: "cluster-{{ .spectro.system.cluster.uid }}" + + - name: "lb-metallb" + registry: Public Repo + type: "spectro" + layer: "addon" + version: "0.11.0" + values: | + pack: + spectrocloud.com/install-priority: "0" + content: + images: + - image: quay.io/metallb/controller:v0.11.0 + - image: quay.io/metallb/speaker:v0.11.0 + manifests: + metallb: + #The namespace to use for deploying MetalLB + namespace: "metallb-system" + #MetalLB will skip setting .0 & .255 IP address when this flag is enabled + avoidBuggyIps: true + # Layer 2 config; The IP address range MetalLB should use while assigning IP's for svc type LoadBalancer + # For the supported formats, check https://metallb.universe.tf/configuration/#layer-2-configuration + addresses: + - 192.168.100.245-192.168.100.254 \ No newline at end of file diff --git a/examples/local-simplified-yaml/admin/config/profile/profile-vsphere-with-creds-infra.yaml b/examples/local-simplified-yaml/admin/config/profile/profile-vsphere-with-creds-infra.yaml new file mode 100644 index 0000000..0281bcc --- /dev/null +++ b/examples/local-simplified-yaml/admin/config/profile/profile-vsphere-with-creds-infra.yaml @@ -0,0 +1,206 @@ +name: vsphere-with-creds-infra +description: "" +type: cluster +cloudType: vsphere +packs: + - name: sles-vsphere + type: spectro + layer: os + registry: Public Repo + version: 15.3 + tag: 15.3 + values: |- + kubeadmconfig: + preKubeadmCommands: + - echo "Executing pre kube admin config commands" + - update-ca-certificates + - 'systemctl restart containerd; sleep 3' + - 'while [ ! -S /var/run/containerd/containerd.sock ]; do echo "Waiting for containerd..."; sleep 1; done' + postKubeadmCommands: + - echo "Executing post kube admin config commands" + files: + - targetPath: /usr/share/pki/trust/anchors/ca.crt + targetOwner: "root:root" + targetPermissions: "0644" + content: | + -----BEGIN CERTIFICATE----- + MIIDozCCAougAwIBAgIQeO8XlqAMLhxvtCap35yktzANBgkqhkiG9w0BAQsFADBS + MQswCQYDVQQGEwJVUzEhMB8GA1UEChMYR2VuZXJhbCBFbGVjdHJpYyBDb21wYW55 + MSAwHgYDVQQDExdHRSBFeHRlcm5hbCBSb290IENBIDIuMTAeFw0xNTAzMDUwMDAw + MDBaFw0zNTAzMDQyMzU5NTlaMFIxCzAJBgNVBAYTAlVTMSEwHwYDVQQKExhHZW5l + cmFsIEVsZWN0cmljIENvbXBhbnkxIDAeBgNVBAMTF0dFIEV4dGVybmFsIFJvb3Qg + Q0EgMi4xMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAzCzT4wNRZtr2 + XTzoTMjppjulZfG35/nOt44q2zg47sxwgZ8o4qjcrwzIhsntoFrRQssjXSF5qXdC + zsm1G7f04qEBimuOH/X+CidWX+sudCS8VyRjXi9cyvUW4/mYKCLXv5M6HhEoIHCD + Xdo6yUr5mSrf18qRR3yUFz0HYXopa2Ls3Q6lBvEUO2Xw04vqVvmg1h7S5jYuZovC + oIbd2+4QGdoSZPgtSNpCxSR+NwtPpzYZpmqiUuDGfVpO3HU42APB0c60D91cJho6 + tZpXYHDsR/RxYGm02K/iMGefD5F4YMrtoKoHbskty6+u5FUOrUgGATJJGtxleg5X + KotQYu8P1wIDAQABo3UwczASBgNVHRMBAf8ECDAGAQH/AgECMA4GA1UdDwEB/wQE + AwIBBjAuBgNVHREEJzAlpCMwITEfMB0GA1UEAxMWR0UtUm9vdC1DT00tUlNBLTIw + NDgtMTAdBgNVHQ4EFgQU3N2mUCJBCLYgtpZyxBeBMJwNZuowDQYJKoZIhvcNAQEL + BQADggEBACF4Zsf2Nm0FpVNeADUH+sl8mFgwL7dfL7+6n7hOgH1ZXcv6pDkoNtVE + 0J/ZPdHJW6ntedKEZuizG5BCclUH3IyYK4/4GxNpFXugmWnKGy2feYwVae7Puyd7 + /iKOFEGCYx4C6E2kq3aFjJqiq1vbgSS/B0agt1D3rH3i/+dXVxx8ZjhyZMuN+cgS + pZL4gnhnSXFAGissxJhKsNkYgvKdOETRNn5lEgfgVyP2iOVqEguHk2Gu0gHSouLu + 5ad/qyN+Zgbjx8vEWlywmhXb78Gaf/AwSGAwQPtmQ0310a4DulGxo/kcuS78vFH1 + mwJmHm9AIFoqBi8XpuhGmQ0nvymurEk= + -----END CERTIFICATE----- + - name: kubernetes + type: spectro + layer: k8s + registry: Public Repo + version: 1.21.10 + tag: 1.21.10 + values: |- + pack: + k8sHardening: True + #CIDR Range for Pods in cluster + # Note : This must not overlap with any of the host or service network + podCIDR: "172.30.0.0/16" + #CIDR notation IP range from which to assign service cluster IPs + # Note : This must not overlap with any IP ranges assigned to nodes for pods. + serviceClusterIpRange: "11.0.0.0/22" + + # KubeAdm customization for kubernetes hardening. Below config will be ignored if k8sHardening property above is disabled + kubeadmconfig: + apiServer: + certSANs: + - "cluster-{{ .spectro.system.cluster.uid }}.{{ .spectro.system.reverseproxy.server }}" + extraArgs: + # Note : secure-port flag is used during kubeadm init. Do not change this flag on a running cluster + secure-port: "6443" + anonymous-auth: "true" + insecure-port: "0" + profiling: "false" + disable-admission-plugins: "AlwaysAdmit" + default-not-ready-toleration-seconds: "60" + default-unreachable-toleration-seconds: "60" + enable-admission-plugins: "NamespaceLifecycle,ServiceAccount,NodeRestriction,PodSecurityPolicy" + audit-log-path: /var/log/apiserver/audit.log + audit-policy-file: /etc/kubernetes/audit-policy.yaml + audit-log-maxage: "30" + audit-log-maxbackup: "10" + audit-log-maxsize: "100" + authorization-mode: RBAC,Node + tls-cipher-suites: "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256" + extraVolumes: + - name: audit-log + hostPath: /var/log/apiserver + mountPath: /var/log/apiserver + pathType: DirectoryOrCreate + - name: audit-policy + hostPath: /etc/kubernetes/audit-policy.yaml + mountPath: /etc/kubernetes/audit-policy.yaml + readOnly: true + pathType: File + controllerManager: + extraArgs: + profiling: "false" + terminated-pod-gc-threshold: "25" + pod-eviction-timeout: "1m0s" + use-service-account-credentials: "true" + feature-gates: "RotateKubeletServerCertificate=true" + scheduler: + extraArgs: + profiling: "false" + kubeletExtraArgs: + read-only-port : "0" + event-qps: "0" + feature-gates: "RotateKubeletServerCertificate=true" + protect-kernel-defaults: "true" + tls-cipher-suites: "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256" + files: + - path: hardening/audit-policy.yaml + targetPath: /etc/kubernetes/audit-policy.yaml + targetOwner: "root:root" + targetPermissions: "0600" + - path: hardening/privileged-psp.yaml + targetPath: /etc/kubernetes/hardening/privileged-psp.yaml + targetOwner: "root:root" + targetPermissions: "0600" + - path: hardening/90-kubelet.conf + targetPath: /etc/sysctl.d/90-kubelet.conf + targetOwner: "root:root" + targetPermissions: "0600" + preKubeadmCommands: + # For enabling 'protect-kernel-defaults' flag to kubelet, kernel parameters changes are required + - 'echo "====> Applying kernel parameters for Kubelet"' + - 'sysctl -p /etc/sysctl.d/90-kubelet.conf' + postKubeadmCommands: + # Apply the privileged PodSecurityPolicy on the first master node ; Otherwise, CNI (and other) pods won't come up + # Sometimes api server takes a little longer to respond. Retry if applying the pod-security-policy manifest fails + - 'export KUBECONFIG=/etc/kubernetes/admin.conf && [ -f "$KUBECONFIG" ] && { echo " ====> Applying PodSecurityPolicy" ; until $(kubectl apply -f /etc/kubernetes/hardening/privileged-psp.yaml > /dev/null ); do echo "Failed to apply PodSecurityPolicies, will retry in 5s" ; sleep 5 ; done ; } || echo "Skipping PodSecurityPolicy for worker nodes"' + + # Client configuration to add OIDC based authentication flags in kubeconfig + #clientConfig: + #oidc-issuer-url: "{{ .spectro.pack.kubernetes.kubeadmconfig.apiServer.extraArgs.oidc-issuer-url }}" + #oidc-client-id: "{{ .spectro.pack.kubernetes.kubeadmconfig.apiServer.extraArgs.oidc-client-id }}" + #oidc-client-secret: 1gsranjjmdgahm10j8r6m47ejokm9kafvcbhi3d48jlc3rfpprhv + #oidc-extra-scope: profile,email + - name: cni-calico + type: spectro + layer: cni + registry: Public Repo + version: 3.19.0 + tag: 3.19.0 + values: |- + pack: + content: + images: + - image: gcr.io/spectro-images-public/calico/kube-controllers:v3.19.0 + - image: gcr.io/spectro-images-public/calico/node:v3.19.0 + - image: gcr.io/spectro-images-public/calico/cni:v3.19.0 + - image: gcr.io/spectro-images-public/calico/pod2daemon-flexvol:v3.19.0 + + manifests: + calico: + + # IPAM type to use. Supported types are calico-ipam, host-local + ipamType: "calico-ipam" + + # Should be one of CALICO_IPV4POOL_IPIP or CALICO_IPV4POOL_VXLAN + encapsulationType: "CALICO_IPV4POOL_IPIP" + + # Should be one of Always, CrossSubnet, Never + encapsulationMode: "Always" + + - name: csi-vsphere-csi + type: spectro + layer: csi + registry: Public Repo + version: 2.3.0 + tag: 2.3.0 + values: |- + pack: + content: + images: + - image: k8s.gcr.io/sig-storage/csi-attacher:v3.2.0 + - image: quay.io/k8scsi/csi-resizer:v1.1.0 + - image: gcr.io/cloud-provider-vsphere/csi/release/driver:v2.3.0 + + manifests: + #Storage class config + vsphere: + + #Toggle for Default class + isDefaultClass: "false" + + #Specifies file system type + fstype: "ext4" + + #Allowed reclaim policies are Delete, Retain + reclaimPolicy: "Delete" + + #Specifies the URL of the datastore on which the container volume needs to be provisioned. + datastoreURL: "" + + #Specifies the storage policy for datastores on which the container volume needs to be provisioned. + storagePolicyName: "" + + volumeBindingMode: "WaitForFirstConsumer" + + #Set this flag to true to enable volume expansion + allowVolumeExpansion: true + + vsphere-cloud-controller-manager: + k8sVersion: "{{ .spectro.system.kubernetes.version }}" diff --git a/examples/local-simplified-yaml/admin/config/profile/profile-without-creds-infra.yaml b/examples/local-simplified-yaml/admin/config/profile/profile-without-creds-infra.yaml new file mode 100644 index 0000000..fd6fe75 --- /dev/null +++ b/examples/local-simplified-yaml/admin/config/profile/profile-without-creds-infra.yaml @@ -0,0 +1,617 @@ +name: without-creds-infra +description: "" +type: cluster +cloudType: edge +packs: + - name: sles-edge + type: spectro + layer: os + registry: Public Repo + version: 15.3 + tag: 15.3 + values: |- + kubeadmconfig: + preKubeadmCommands: + - echo "Executing pre kube admin config commands" + - update-ca-certificates + - 'systemctl restart containerd; sleep 3' + - 'while [ ! -S /var/run/containerd/containerd.sock ]; do echo "Waiting for containerd..."; sleep 1; done' + postKubeadmCommands: + - echo "Executing post kube admin config commands" + files: + - targetPath: /usr/share/pki/trust/anchors/ca.crt + targetOwner: "root:root" + targetPermissions: "0644" + content: | + -----BEGIN CERTIFICATE----- + MIIDozCCAougAwIBAgIQeO8XlqAMLhxvtCap35yktzANBgkqhkiG9w0BAQsFADBS + MQswCQYDVQQGEwJVUzEhMB8GA1UEChMYR2VuZXJhbCBFbGVjdHJpYyBDb21wYW55 + MSAwHgYDVQQDExdHRSBFeHRlcm5hbCBSb290IENBIDIuMTAeFw0xNTAzMDUwMDAw + MDBaFw0zNTAzMDQyMzU5NTlaMFIxCzAJBgNVBAYTAlVTMSEwHwYDVQQKExhHZW5l + cmFsIEVsZWN0cmljIENvbXBhbnkxIDAeBgNVBAMTF0dFIEV4dGVybmFsIFJvb3Qg + Q0EgMi4xMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAzCzT4wNRZtr2 + XTzoTMjppjulZfG35/nOt44q2zg47sxwgZ8o4qjcrwzIhsntoFrRQssjXSF5qXdC + zsm1G7f04qEBimuOH/X+CidWX+sudCS8VyRjXi9cyvUW4/mYKCLXv5M6HhEoIHCD + Xdo6yUr5mSrf18qRR3yUFz0HYXopa2Ls3Q6lBvEUO2Xw04vqVvmg1h7S5jYuZovC + oIbd2+4QGdoSZPgtSNpCxSR+NwtPpzYZpmqiUuDGfVpO3HU42APB0c60D91cJho6 + tZpXYHDsR/RxYGm02K/iMGefD5F4YMrtoKoHbskty6+u5FUOrUgGATJJGtxleg5X + KotQYu8P1wIDAQABo3UwczASBgNVHRMBAf8ECDAGAQH/AgECMA4GA1UdDwEB/wQE + AwIBBjAuBgNVHREEJzAlpCMwITEfMB0GA1UEAxMWR0UtUm9vdC1DT00tUlNBLTIw + NDgtMTAdBgNVHQ4EFgQU3N2mUCJBCLYgtpZyxBeBMJwNZuowDQYJKoZIhvcNAQEL + BQADggEBACF4Zsf2Nm0FpVNeADUH+sl8mFgwL7dfL7+6n7hOgH1ZXcv6pDkoNtVE + 0J/ZPdHJW6ntedKEZuizG5BCclUH3IyYK4/4GxNpFXugmWnKGy2feYwVae7Puyd7 + /iKOFEGCYx4C6E2kq3aFjJqiq1vbgSS/B0agt1D3rH3i/+dXVxx8ZjhyZMuN+cgS + pZL4gnhnSXFAGissxJhKsNkYgvKdOETRNn5lEgfgVyP2iOVqEguHk2Gu0gHSouLu + 5ad/qyN+Zgbjx8vEWlywmhXb78Gaf/AwSGAwQPtmQ0310a4DulGxo/kcuS78vFH1 + mwJmHm9AIFoqBi8XpuhGmQ0nvymurEk= + -----END CERTIFICATE----- + - name: kubernetes + type: spectro + layer: k8s + registry: Public Repo + version: 1.21.10 + tag: 1.21.10 + values: |- + pack: + k8sHardening: True + #CIDR Range for Pods in cluster + # Note : This must not overlap with any of the host or service network + podCIDR: "172.10.0.0/16" + #CIDR notation IP range from which to assign service cluster IPs + # Note : This must not overlap with any IP ranges assigned to nodes for pods. + serviceClusterIpRange: "11.0.0.0/22" + content: + images: + - image: gcr.io/spectro-images-public/release/edge/node:s-153-0-k-12110-0 + + # KubeAdm customization for kubernetes hardening. Below config will be ignored if k8sHardening property above is disabled + kubeadmconfig: + apiServer: + certSANs: + - "cluster-{{ .spectro.system.cluster.uid }}.{{ .spectro.system.reverseproxy.server }}" + extraArgs: + # Note : secure-port flag is used during kubeadm init. Do not change this flag on a running cluster + secure-port: "6443" + anonymous-auth: "true" + insecure-port: "0" + profiling: "false" + disable-admission-plugins: "AlwaysAdmit" + default-not-ready-toleration-seconds: "60" + default-unreachable-toleration-seconds: "60" + enable-admission-plugins: "NamespaceLifecycle,ServiceAccount,NodeRestriction,PodSecurityPolicy" + audit-log-path: /var/log/apiserver/audit.log + audit-policy-file: /etc/kubernetes/audit-policy.yaml + audit-log-maxage: "30" + audit-log-maxbackup: "10" + audit-log-maxsize: "100" + authorization-mode: RBAC,Node + tls-cipher-suites: "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256" + extraVolumes: + - name: audit-log + hostPath: /var/log/apiserver + mountPath: /var/log/apiserver + pathType: DirectoryOrCreate + - name: audit-policy + hostPath: /etc/kubernetes/audit-policy.yaml + mountPath: /etc/kubernetes/audit-policy.yaml + readOnly: true + pathType: File + controllerManager: + extraArgs: + profiling: "false" + terminated-pod-gc-threshold: "25" + pod-eviction-timeout: "1m0s" + use-service-account-credentials: "true" + feature-gates: "RotateKubeletServerCertificate=true" + scheduler: + extraArgs: + profiling: "false" + kubeletExtraArgs: + read-only-port : "0" + event-qps: "0" + feature-gates: "RotateKubeletServerCertificate=true" + protect-kernel-defaults: "true" + tls-cipher-suites: "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256" + files: + - path: hardening/audit-policy.yaml + targetPath: /etc/kubernetes/audit-policy.yaml + targetOwner: "root:root" + targetPermissions: "0600" + - path: hardening/privileged-psp.yaml + targetPath: /etc/kubernetes/hardening/privileged-psp.yaml + targetOwner: "root:root" + targetPermissions: "0600" + - path: hardening/90-kubelet.conf + targetPath: /etc/sysctl.d/90-kubelet.conf + targetOwner: "root:root" + targetPermissions: "0600" + preKubeadmCommands: + # For enabling 'protect-kernel-defaults' flag to kubelet, kernel parameters changes are required + - 'echo "====> Applying kernel parameters for Kubelet"' + - 'sysctl -p /etc/sysctl.d/90-kubelet.conf' + postKubeadmCommands: + # Apply the privileged PodSecurityPolicy on the first master node ; Otherwise, CNI (and other) pods won't come up + # Sometimes api server takes a little longer to respond. Retry if applying the pod-security-policy manifest fails + - 'export KUBECONFIG=/etc/kubernetes/admin.conf && [ -f "$KUBECONFIG" ] && { echo " ====> Applying PodSecurityPolicy" ; until $(kubectl apply -f /etc/kubernetes/hardening/privileged-psp.yaml > /dev/null ); do echo "Failed to apply PodSecurityPolicies, will retry in 5s" ; sleep 5 ; done ; } || echo "Skipping PodSecurityPolicy for worker nodes"' + + # Client configuration to add OIDC based authentication flags in kubeconfig + #clientConfig: + #oidc-issuer-url: "{{ .spectro.pack.kubernetes.kubeadmconfig.apiServer.extraArgs.oidc-issuer-url }}" + #oidc-client-id: "{{ .spectro.pack.kubernetes.kubeadmconfig.apiServer.extraArgs.oidc-client-id }}" + #oidc-client-secret: 1gsranjjmdgahm10j8r6m47ejokm9kafvcbhi3d48jlc3rfpprhv + #oidc-extra-scope: profile,email + - name: cni-calico + type: spectro + layer: cni + registry: Public Repo + version: 3.19.0 + tag: 3.19.0 + values: |- + pack: + content: + images: + - image: gcr.io/spectro-images-public/calico/kube-controllers:v3.19.0 + - image: gcr.io/spectro-images-public/calico/node:v3.19.0 + - image: gcr.io/spectro-images-public/calico/cni:v3.19.0 + - image: gcr.io/spectro-images-public/calico/pod2daemon-flexvol:v3.19.0 + + manifests: + calico: + + # IPAM type to use. Supported types are calico-ipam, host-local + ipamType: "calico-ipam" + + # Should be one of CALICO_IPV4POOL_IPIP or CALICO_IPV4POOL_VXLAN + encapsulationType: "CALICO_IPV4POOL_IPIP" + + # Should be one of Always, CrossSubnet, Never + encapsulationMode: "Always" + - name: csi-rook-ceph + type: spectro + layer: csi + registry: Public Repo + version: 1.8.0 + tag: 1.8.0 + values: |- + pack: + content: + images: + - image: k8s.gcr.io/sig-storage/csi-node-driver-registrar:v2.3.0 + - image: k8s.gcr.io/sig-storage/csi-attacher:v3.3.0 + - image: k8s.gcr.io/sig-storage/csi-snapshotter:v4.2.0 + - image: k8s.gcr.io/sig-storage/csi-resizer:v1.3.0 + - image: k8s.gcr.io/sig-storage/csi-provisioner:v3.0.0 + - image: quay.io/cephcsi/cephcsi:v3.4.0 + - image: quay.io/ceph/ceph:v16.2.7 + - image: docker.io/rook/ceph:v1.8.0 + + + manifests: + storageclass: + contents: | + apiVersion: ceph.rook.io/v1 + kind: CephFilesystem + metadata: + name: myfs + namespace: rook-ceph # namespace:cluster + spec: + # The metadata pool spec. Must use replication. + metadataPool: + replicated: + size: 3 + requireSafeReplicaSize: true + parameters: + # Inline compression mode for the data pool + # Further reference: https://docs.ceph.com/docs/master/rados/configuration/bluestore-config-ref/#inline-compression + compression_mode: + none + # gives a hint (%) to Ceph in terms of expected consumption of the total cluster capacity of a given pool + # for more info: https://docs.ceph.com/docs/master/rados/operations/placement-groups/#specifying-expected-pool-size + #target_size_ratio: ".5" + # The list of data pool specs. Can use replication or erasure coding. + dataPools: + - name: replicated + failureDomain: host + replicated: + size: 3 + # Disallow setting pool with replica 1, this could lead to data loss without recovery. + # Make sure you're *ABSOLUTELY CERTAIN* that is what you want + requireSafeReplicaSize: true + parameters: + # Inline compression mode for the data pool + # Further reference: https://docs.ceph.com/docs/master/rados/configuration/bluestore-config-ref/#inline-compression + compression_mode: + none + # gives a hint (%) to Ceph in terms of expected consumption of the total cluster capacity of a given pool + # for more info: https://docs.ceph.com/docs/master/rados/operations/placement-groups/#specifying-expected-pool-size + #target_size_ratio: ".5" + # Whether to preserve filesystem after CephFilesystem CRD deletion + preserveFilesystemOnDelete: true + # The metadata service (mds) configuration + metadataServer: + # The number of active MDS instances + activeCount: 1 + # Whether each active MDS instance will have an active standby with a warm metadata cache for faster failover. + # If false, standbys will be available, but will not have a warm cache. + activeStandby: true + # The affinity rules to apply to the mds deployment + placement: + # nodeAffinity: + # requiredDuringSchedulingIgnoredDuringExecution: + # nodeSelectorTerms: + # - matchExpressions: + # - key: role + # operator: In + # values: + # - mds-node + # topologySpreadConstraints: + # tolerations: + # - key: mds-node + # operator: Exists + # podAffinity: + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchExpressions: + - key: app + operator: In + values: + - rook-ceph-mds + # topologyKey: kubernetes.io/hostname will place MDS across different hosts + topologyKey: kubernetes.io/hostname + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 100 + podAffinityTerm: + labelSelector: + matchExpressions: + - key: app + operator: In + values: + - rook-ceph-mds + # topologyKey: */zone can be used to spread MDS across different AZ + # Use in k8s cluster if your cluster is v1.16 or lower + # Use in k8s cluster is v1.17 or upper + topologyKey: topology.kubernetes.io/zone + # A key/value list of annotations + annotations: + # key: value + # A key/value list of labels + labels: + # key: value + resources: + # The requests and limits set here, allow the filesystem MDS Pod(s) to use half of one CPU core and 1 gigabyte of memory + # limits: + # cpu: "500m" + # memory: "1024Mi" + # requests: + # cpu: "500m" + # memory: "1024Mi" + # priorityClassName: my-priority-class + # Filesystem mirroring settings + # mirroring: + # enabled: true + # list of Kubernetes Secrets containing the peer token + # for more details see: https://docs.ceph.com/en/latest/dev/cephfs-mirroring/#bootstrap-peers + # peers: + #secretNames: + #- secondary-cluster-peer + # specify the schedule(s) on which snapshots should be taken + # see the official syntax here https://docs.ceph.com/en/latest/cephfs/snap-schedule/#add-and-remove-schedules + # snapshotSchedules: + # - path: / + # interval: 24h # daily snapshots + # startTime: 11:55 + # manage retention policies + # see syntax duration here https://docs.ceph.com/en/latest/cephfs/snap-schedule/#add-and-remove-retention-policies + # snapshotRetention: + # - path: / + # duration: "h 24" + --- + apiVersion: storage.k8s.io/v1 + kind: StorageClass + metadata: + name: standard + annotations: + storageclass.kubernetes.io/is-default-class: "true" + # Change "rook-ceph" provisioner prefix to match the operator namespace if needed + provisioner: rook-ceph.cephfs.csi.ceph.com # driver:namespace:operator + parameters: + # clusterID is the namespace where the rook cluster is running + # If you change this namespace, also change the namespace below where the secret namespaces are defined + clusterID: rook-ceph # namespace:cluster + + # CephFS filesystem name into which the volume shall be created + fsName: myfs + + # Ceph pool into which the volume shall be created + # Required for provisionVolume: "true" + pool: myfs-data0 + + # The secrets contain Ceph admin credentials. These are generated automatically by the operator + # in the same namespace as the cluster. + csi.storage.k8s.io/provisioner-secret-name: rook-csi-cephfs-provisioner + csi.storage.k8s.io/provisioner-secret-namespace: rook-ceph # namespace:cluster + csi.storage.k8s.io/controller-expand-secret-name: rook-csi-cephfs-provisioner + csi.storage.k8s.io/controller-expand-secret-namespace: rook-ceph # namespace:cluster + csi.storage.k8s.io/node-stage-secret-name: rook-csi-cephfs-node + csi.storage.k8s.io/node-stage-secret-namespace: rook-ceph # namespace:cluster + + # (optional) The driver can use either ceph-fuse (fuse) or ceph kernel client (kernel) + # If omitted, default volume mounter will be used - this is determined by probing for ceph-fuse + # or by setting the default mounter explicitly via --volumemounter command-line argument. + # mounter: kernel + reclaimPolicy: Delete + allowVolumeExpansion: true + #Supported binding modes are Immediate, WaitForFirstConsumer + volumeBindingMode: "WaitForFirstConsumer" + mountOptions: + # uncomment the following line for debugging + #- debug + + cluster: + contents: | + apiVersion: ceph.rook.io/v1 + kind: CephCluster + metadata: + name: rook-ceph + namespace: rook-ceph # namespace:cluster + spec: + cephVersion: + # The container image used to launch the Ceph daemon pods (mon, mgr, osd, mds, rgw). + # v15 is octopus, and v16 is pacific. + # RECOMMENDATION: In production, use a specific version tag instead of the general v14 flag, which pulls the latest release and could result in different + # versions running within the cluster. See tags available at https://hub.docker.com/r/ceph/ceph/tags/. + # If you want to be more precise, you can always use a timestamp tag such quay.io/ceph/ceph:v16.2.7-20211208 + # This tag might not contain a new Ceph version, just security fixes from the underlying operating system, which will reduce vulnerabilities + image: quay.io/ceph/ceph:v16.2.7 + # Whether to allow unsupported versions of Ceph. Currently `octopus` and `pacific` are supported. + # Future versions such as `pacific` would require this to be set to `true`. + # Do not set to true in production. + allowUnsupported: false + # The path on the host where configuration files will be persisted. Must be specified. + # Important: if you reinstall the cluster, make sure you delete this directory from each host or else the mons will fail to start on the new cluster. + # In Minikube, the '/data' directory is configured to persist across reboots. Use "/data/rook" in Minikube environment. + dataDirHostPath: /var/lib/rook + # Whether or not upgrade should continue even if a check fails + # This means Ceph's status could be degraded and we don't recommend upgrading but you might decide otherwise + # Use at your OWN risk + # To understand Rook's upgrade process of Ceph, read https://rook.io/docs/rook/latest/ceph-upgrade.html#ceph-version-upgrades + skipUpgradeChecks: false + # Whether or not continue if PGs are not clean during an upgrade + continueUpgradeAfterChecksEvenIfNotHealthy: false + # WaitTimeoutForHealthyOSDInMinutes defines the time (in minutes) the operator would wait before an OSD can be stopped for upgrade or restart. + # If the timeout exceeds and OSD is not ok to stop, then the operator would skip upgrade for the current OSD and proceed with the next one + # if `continueUpgradeAfterChecksEvenIfNotHealthy` is `false`. If `continueUpgradeAfterChecksEvenIfNotHealthy` is `true`, then opertor would + # continue with the upgrade of an OSD even if its not ok to stop after the timeout. This timeout won't be applied if `skipUpgradeChecks` is `true`. + # The default wait timeout is 10 minutes. + waitTimeoutForHealthyOSDInMinutes: 10 + mon: + # Set the number of mons to be started. Generally recommended to be 3. + # For highest availability, an odd number of mons should be specified. + count: 3 + # The mons should be on unique nodes. For production, at least 3 nodes are recommended for this reason. + # Mons should only be allowed on the same node for test environments where data loss is acceptable. + allowMultiplePerNode: false + mgr: + # When higher availability of the mgr is needed, increase the count to 2. + # In that case, one mgr will be active and one in standby. When Ceph updates which + # mgr is active, Rook will update the mgr services to match the active mgr. + count: 1 + modules: + # Several modules should not need to be included in this list. The "dashboard" and "monitoring" modules + # are already enabled by other settings in the cluster CR. + - name: pg_autoscaler + enabled: true + # enable the ceph dashboard for viewing cluster status + dashboard: + enabled: true + # serve the dashboard under a subpath (useful when you are accessing the dashboard via a reverse proxy) + # urlPrefix: /ceph-dashboard + # serve the dashboard at the given port. + # port: 8443 + # serve the dashboard using SSL + ssl: true + # enable prometheus alerting for cluster + monitoring: + # requires Prometheus to be pre-installed + enabled: false + # namespace to deploy prometheusRule in. If empty, namespace of the cluster will be used. + # Recommended: + # If you have a single rook-ceph cluster, set the rulesNamespace to the same namespace as the cluster or keep it empty. + # If you have multiple rook-ceph clusters in the same k8s cluster, choose the same namespace (ideally, namespace with prometheus + # deployed) to set rulesNamespace for all the clusters. Otherwise, you will get duplicate alerts with multiple alert definitions. + rulesNamespace: rook-ceph + network: + # enable host networking + #provider: host + # enable the Multus network provider + #provider: multus + #selectors: + # The selector keys are required to be `public` and `cluster`. + # Based on the configuration, the operator will do the following: + # 1. if only the `public` selector key is specified both public_network and cluster_network Ceph settings will listen on that interface + # 2. if both `public` and `cluster` selector keys are specified the first one will point to 'public_network' flag and the second one to 'cluster_network' + # + # In order to work, each selector value must match a NetworkAttachmentDefinition object in Multus + # + #public: public-conf --> NetworkAttachmentDefinition object name in Multus + #cluster: cluster-conf --> NetworkAttachmentDefinition object name in Multus + # Provide internet protocol version. IPv6, IPv4 or empty string are valid options. Empty string would mean IPv4 + #ipFamily: "IPv6" + # Ceph daemons to listen on both IPv4 and Ipv6 networks + #dualStack: false + # enable the crash collector for ceph daemon crash collection + crashCollector: + disable: false + # Uncomment daysToRetain to prune ceph crash entries older than the + # specified number of days. + #daysToRetain: 30 + # enable log collector, daemons will log on files and rotate + # logCollector: + # enabled: true + # periodicity: 24h # SUFFIX may be 'h' for hours or 'd' for days. + # automate [data cleanup process](https://github.com/rook/rook/blob/master/Documentation/ceph-teardown.md#delete-the-data-on-hosts) in cluster destruction. + cleanupPolicy: + # Since cluster cleanup is destructive to data, confirmation is required. + # To destroy all Rook data on hosts during uninstall, confirmation must be set to "yes-really-destroy-data". + # This value should only be set when the cluster is about to be deleted. After the confirmation is set, + # Rook will immediately stop configuring the cluster and only wait for the delete command. + # If the empty string is set, Rook will not destroy any data on hosts during uninstall. + confirmation: "" + # sanitizeDisks represents settings for sanitizing OSD disks on cluster deletion + sanitizeDisks: + # method indicates if the entire disk should be sanitized or simply ceph's metadata + # in both case, re-install is possible + # possible choices are 'complete' or 'quick' (default) + method: quick + # dataSource indicate where to get random bytes from to write on the disk + # possible choices are 'zero' (default) or 'random' + # using random sources will consume entropy from the system and will take much more time then the zero source + dataSource: zero + # iteration overwrite N times instead of the default (1) + # takes an integer value + iteration: 1 + # allowUninstallWithVolumes defines how the uninstall should be performed + # If set to true, cephCluster deletion does not wait for the PVs to be deleted. + allowUninstallWithVolumes: false + # To control where various services will be scheduled by kubernetes, use the placement configuration sections below. + # The example under 'all' would have all services scheduled on kubernetes nodes labeled with 'role=storage-node' and + # tolerate taints with a key of 'storage-node'. + # placement: + # all: + # nodeAffinity: + # requiredDuringSchedulingIgnoredDuringExecution: + # nodeSelectorTerms: + # - matchExpressions: + # - key: role + # operator: In + # values: + # - storage-node + # podAffinity: + # podAntiAffinity: + # topologySpreadConstraints: + # tolerations: + # - key: storage-node + # operator: Exists + # The above placement information can also be specified for mon, osd, and mgr components + # mon: + # Monitor deployments may contain an anti-affinity rule for avoiding monitor + # collocation on the same node. This is a required rule when host network is used + # or when AllowMultiplePerNode is false. Otherwise this anti-affinity rule is a + # preferred rule with weight: 50. + # osd: + # mgr: + # cleanup: + annotations: + # all: + # mon: + # osd: + # cleanup: + # prepareosd: + # If no mgr annotations are set, prometheus scrape annotations will be set by default. + # mgr: + labels: + # all: + # mon: + # osd: + # cleanup: + # mgr: + # prepareosd: + # monitoring is a list of key-value pairs. It is injected into all the monitoring resources created by operator. + # These labels can be passed as LabelSelector to Prometheus + # monitoring: + # crashcollector: + resources: + # The requests and limits set here, allow the mgr pod to use half of one CPU core and 1 gigabyte of memory + # mgr: + # limits: + # cpu: "500m" + # memory: "1024Mi" + # requests: + # cpu: "500m" + # memory: "1024Mi" + # The above example requests/limits can also be added to the other components + # mon: + # osd: + # For OSD it also is a possible to specify requests/limits based on device class + # osd-hdd: + # osd-ssd: + # osd-nvme: + # prepareosd: + # mgr-sidecar: + # crashcollector: + # logcollector: + # cleanup: + # The option to automatically remove OSDs that are out and are safe to destroy. + removeOSDsIfOutAndSafeToRemove: true + # priorityClassNames: + # all: rook-ceph-default-priority-class + # mon: rook-ceph-mon-priority-class + # osd: rook-ceph-osd-priority-class + # mgr: rook-ceph-mgr-priority-class + storage: # cluster level storage configuration and selection + useAllNodes: true + useAllDevices: false + deviceFilter: ^sd[b-d] + config: + # crushRoot: "custom-root" # specify a non-default root label for the CRUSH map + # metadataDevice: "md0" # specify a non-rotational storage so ceph-volume will use it as block db device of bluestore. + # databaseSizeMB: "1024" # uncomment if the disks are smaller than 100 GB + # journalSizeMB: "1024" # uncomment if the disks are 20 GB or smaller + # osdsPerDevice: "1" # this value can be overridden at the node or device level + # encryptedDevice: "true" # the default value for this option is "false" + # Individual nodes and their config can be specified as well, but 'useAllNodes' above must be set to false. Then, only the named + # nodes below will be used as storage resources. Each node's 'name' field should match their 'kubernetes.io/hostname' label. + # nodes: + # - name: "172.17.4.201" + # devices: # specific devices to use for storage can be specified for each node + # - name: "sdb" + # - name: "nvme01" # multiple osds can be created on high performance devices + # config: + # osdsPerDevice: "5" + # - name: "/dev/disk/by-id/ata-ST4000DM004-XXXX" # devices can be specified using full udev paths + # config: # configuration can be specified at the node level which overrides the cluster level config + # - name: "172.17.4.301" + # deviceFilter: "^sd." + # when onlyApplyOSDPlacement is false, will merge both placement.All() and placement.osd + onlyApplyOSDPlacement: false + # The section for configuring management of daemon disruptions during upgrade or fencing. + disruptionManagement: + # If true, the operator will create and manage PodDisruptionBudgets for OSD, Mon, RGW, and MDS daemons. OSD PDBs are managed dynamically + # via the strategy outlined in the [design](https://github.com/rook/rook/blob/master/design/ceph/ceph-managed-disruptionbudgets.md). The operator will + # block eviction of OSDs by default and unblock them safely when drains are detected. + managePodBudgets: true + # A duration in minutes that determines how long an entire failureDomain like `region/zone/host` will be held in `noout` (in addition to the + # default DOWN/OUT interval) when it is draining. This is only relevant when `managePodBudgets` is `true`. The default value is `30` minutes. + osdMaintenanceTimeout: 30 + # A duration in minutes that the operator will wait for the placement groups to become healthy (active+clean) after a drain was completed and OSDs came back up. + # Operator will continue with the next drain if the timeout exceeds. It only works if `managePodBudgets` is `true`. + # No values or 0 means that the operator will wait until the placement groups are healthy before unblocking the next drain. + pgHealthCheckTimeout: 0 + # If true, the operator will create and manage MachineDisruptionBudgets to ensure OSDs are only fenced when the cluster is healthy. + # Only available on OpenShift. + manageMachineDisruptionBudgets: false + # Namespace in which to watch for the MachineDisruptionBudgets. + machineDisruptionBudgetNamespace: openshift-machine-api + + # healthChecks + # Valid values for daemons are 'mon', 'osd', 'status' + healthCheck: + daemonHealth: + mon: + disabled: false + interval: 45s + osd: + disabled: false + interval: 60s + status: + disabled: false + interval: 60s + # Change pod liveness probe, it works for all mon,mgr,osd daemons + livenessProbe: + mon: + disabled: false + mgr: + disabled: false + osd: + disabled: false diff --git a/examples/local-simplified-yaml/admin/config/project/project-hospital-200.yaml b/examples/local-simplified-yaml/admin/config/project/project-hospital-200.yaml new file mode 100644 index 0000000..91798f1 --- /dev/null +++ b/examples/local-simplified-yaml/admin/config/project/project-hospital-200.yaml @@ -0,0 +1,2 @@ +name: hospital-200 +description: "project for hospital 200" diff --git a/examples/local-simplified-yaml/admin/config/project/project-hospital-201.yaml b/examples/local-simplified-yaml/admin/config/project/project-hospital-201.yaml new file mode 100644 index 0000000..3e5f03c --- /dev/null +++ b/examples/local-simplified-yaml/admin/config/project/project-hospital-201.yaml @@ -0,0 +1,2 @@ +name: hospital-201 +description: "project for hospital 201" diff --git a/examples/local-simplified-yaml/admin/config/project/project-hospital-202.yaml b/examples/local-simplified-yaml/admin/config/project/project-hospital-202.yaml new file mode 100644 index 0000000..92f16e6 --- /dev/null +++ b/examples/local-simplified-yaml/admin/config/project/project-hospital-202.yaml @@ -0,0 +1,2 @@ +name: hospital-202 +description: "project for hospital 202" diff --git a/examples/local-simplified-yaml/admin/gitlab_project.tf b/examples/local-simplified-yaml/admin/gitlab_project.tf new file mode 100644 index 0000000..7582adc --- /dev/null +++ b/examples/local-simplified-yaml/admin/gitlab_project.tf @@ -0,0 +1,62 @@ +#locals { +# gitlab_project_ids = { +# for k, v in gitlab_project.this : +# v.name => v.id +# } +#} +# +#resource "gitlab_project" "this" { +# for_each = local.projects +# +# name = each.value.name +# description = each.value.description +# visibility_level = "public" # or 'private' +# pipelines_enabled = true +# shared_runners_enabled = true # shared runners means runners from different project can be used +# import_url = each.value.import_url +#} +# +#resource "gitlab_project_variable" "host" { +# for_each = local.projects +# +# project = local.gitlab_project_ids[each.value.name] +# key = "SC_HOST_DEV" +# value = var.sc_host +# protected = false +#} +# +#resource "gitlab_project_variable" "username" { +# for_each = local.projects +# +# project = local.gitlab_project_ids[each.value.name] +# key = "SC_USERNAME_DEV" +# value = var.sc_username +# protected = false +#} +# +#resource "gitlab_project_variable" "password" { +# for_each = local.projects +# +# project = local.gitlab_project_ids[each.value.name] +# key = "SC_PASSWORD_DEV" +# value = var.sc_password +# protected = false +#} +# +#resource "gitlab_project_variable" "project" { +# for_each = local.projects +# +# project = local.gitlab_project_ids[each.value.name] +# key = "SC_PROJECT_DEV" +# value = each.value.name +# protected = false +#} +# +#resource "gitlab_project_variable" "statekey" { +# for_each = local.projects +# +# project = local.gitlab_project_ids[each.value.name] +# key = "PROJECT_TF_STATE" +# value = each.value.name +# protected = false +#} \ No newline at end of file diff --git a/examples/local-simplified-yaml/admin/main.tf b/examples/local-simplified-yaml/admin/main.tf new file mode 100644 index 0000000..102662c --- /dev/null +++ b/examples/local-simplified-yaml/admin/main.tf @@ -0,0 +1,55 @@ +terraform { + required_version = ">= 0.14.0" + + required_providers { + spectrocloud = { + version = "= 0.6.10-pre" + source = "spectrocloud/spectrocloud" + } + + # gitlab = { + # source = "gitlabhq/gitlab" + # version = "3.6.0" + # } + } +} + +variable "sc_host" {} +variable "sc_api_key" { + sensitive = true +} + +provider "spectrocloud" { + host = var.sc_host + api_key = var.sc_api_key + project_name = "" +} + +#variable "gitlab_token" {} +# +#provider "gitlab" { +# token = var.gitlab_token +#} + +locals { + projects = { + for k in fileset("config/project", "project-*.yaml") : + trimsuffix(k, ".yaml") => yamldecode(file("config/project/${k}")) + } + + profiles = { + for k in fileset("config/profile", "profile-*.yaml") : + trimsuffix(k, ".yaml") => yamldecode(file("config/profile/${k}")) + } +} + +module "Spectro" { + source = "github.com/spectrocloud/terraform-spectrocloud-modules" + + # It is recommended to use latest version of module instead of using latest from github + #source = "spectrocloud/modules/spectrocloud" + #version = "0.0.7" + + projects = local.projects + profiles = local.profiles +} diff --git a/examples/local-simplified-yaml/admin/terraform.template.tfvars b/examples/local-simplified-yaml/admin/terraform.template.tfvars new file mode 100644 index 0000000..9822bc4 --- /dev/null +++ b/examples/local-simplified-yaml/admin/terraform.template.tfvars @@ -0,0 +1,5 @@ +# Credentials +sc_host = "{enter Spectro Cloud host, blank for SaaS}" +sc_api_key = "{enter Spectro Cloud API Key}" + +#gitlab_token = "{enter Gitlab access token}" \ No newline at end of file diff --git a/examples/local-simplified-yaml/project-bm-200/README.md b/examples/local-simplified-yaml/project-bm-200/README.md new file mode 100644 index 0000000..a81c37d --- /dev/null +++ b/examples/local-simplified-yaml/project-bm-200/README.md @@ -0,0 +1 @@ +Bare metal appliance project \ No newline at end of file diff --git a/examples/local-simplified-yaml/project-bm-200/config/appliance/appliance-hospital-200.yaml b/examples/local-simplified-yaml/project-bm-200/config/appliance/appliance-hospital-200.yaml new file mode 100644 index 0000000..55a96b8 --- /dev/null +++ b/examples/local-simplified-yaml/project-bm-200/config/appliance/appliance-hospital-200.yaml @@ -0,0 +1 @@ +id: "hospital-200" diff --git a/examples/local-simplified-yaml/project-bm-200/config/cluster/cluster-hospital-200.yaml b/examples/local-simplified-yaml/project-bm-200/config/cluster/cluster-hospital-200.yaml new file mode 100644 index 0000000..66b389b --- /dev/null +++ b/examples/local-simplified-yaml/project-bm-200/config/cluster/cluster-hospital-200.yaml @@ -0,0 +1,50 @@ +name: hospital-200 +cloudType: libvirt +tags: +- "skip_completion" +profiles: + infra: + name: bm-infra + system: + name: hospital-200-system-profile + addons: + - name: spectro-core +cloud_config: + ssh_key: "ssh-rsa AADS0v57QKjb1o9ubvvATQNg2T3x+inckfzfgX3et1H9X1oSp0FqY1+Mmy5nfTtTyIj5GetePsqa+MgCjnLfCBiOzmZSRgWEre//IipNzMnnZC7El5KJCQn8ksF+DYY9eT9NtNFEMALTZC6hn8BnMc14zqxoJP/GNHftmig8TJC500Uofdr4OKTCRr1JwHS79Cx9LyZdAp/1D8mL6bIMyGOTPVQ8xUpmEYj77m1kdiCHCk22YtLyfUWuQ0SC+2p1soDoNfJUpmxcKboOTZsLq1HDCFrqSyLUWS1PrYZ/MzhsPrsDewB1iHLbYDt87r2odJOpxMO1vNWMOYontODdr5JPKBpCcd/noNyOy/m4Spntytfb/J3kM1oz3dpPfN0xXmC19uR1xHklmbtg1j784IMu7umI2ZCpUwLADAodkbxmbacdkp5I+1NFgrFamvnTjjQAvRexV31m4m9GielKFQ4tCCId2yagMBWRFn5taEhb3SKnRxBcAzaJLopUyErOtqxvSywGvb53v4MEShqBaQSUv4gHfw== spectro2022" + vip: 192.168.100.15 + ntp_servers: ["ntp.ge.com"] +node_groups: + - name: master-pool + control_plane: true + control_plane_as_worker: true + count: 1 + disk_size_gb: 60 + memory_mb: 8192 + cpu: 2 + placements: + - appliance: "hospital-200" + network_type: "bridge" + network_names: "br0" + network: "br" + image_storage_pool: "ehl_images" + target_storage_pool: "ehl_images" + data_storage_pool: "ehl_data" + + - name: worker-pool + count: 3 + disk_size_gb: 60 + memory_mb: 8192 + cpu: 4 + attached_disks: + - size_in_gb: 30 + managed: true + - size_in_gb: 10 + managed: true + placements: + - appliance: "hospital-200" + network_type: "bridge" + network_names: "br0" + network: "br" + image_storage_pool: "ehl_images" + target_storage_pool: "ehl_images" + data_storage_pool: "ehl_data" diff --git a/examples/local-simplified-yaml/project-bm-200/config/profile/profile-system.yaml b/examples/local-simplified-yaml/project-bm-200/config/profile/profile-system.yaml new file mode 100644 index 0000000..89ba3eb --- /dev/null +++ b/examples/local-simplified-yaml/project-bm-200/config/profile/profile-system.yaml @@ -0,0 +1,1152 @@ +name: hospital-200-system-profile +description: system-profile +type: system +cloudType: all +packs: + - name: "pfsense-gateway" + type: manifest + registry: Public Repo + manifests: + - name: pfsense-gateway-config + content: | + --- + apiVersion: v1 + data: + user-data.tmpl: | + #cloud-config + write_files: + - encoding: base64 + content: ${CONFIG_XML} + owner: root:root + path: /cf/conf/config.xml + permissions: '0644' + config-xml-static.tmpl: | + + + 21.5 + + + normal + gateway + edison + + all + + system + 1998 + + + admins + + system + 1999 + 0 + page-all + + + admin + + system + admins + $2y$10$K3ejlPZsxpqhd63EzyNgf.qK4RDhlSvW2ln.gIq7kbkVdlRiZ1XB2 + 0 + user-shell-access + 2 + c3NoLXJzYSBBQUFBQjNOemFDMXljMkVBQUFBREFRQUJBQUFCZ1FDdzR4OW43NEtsVnRqUnBBRUhSVWJldnI2dVBDa0NnYkpHSTBKTUFhdnJ3K2JlTmI1V0wyOE1nOFU0MzBiWnhJRWFHU05pSXQreDk2aGFPY0RjOEFaMjRDa0Uxb1ZUS2x6aExyQkJGc0hmaVpuMHRYK1dtZWIyaGZBcmxQRnk2ZXJaZHRVTTUrYUQ2VFBYZmR4M0VWT2FBRzNncTFhbS9lSzFBM1NPVTVjTnFZMExjcWpGVDc0dG03T0NNc1VFSkg4emlCKzQ3ZTh2TWtDZjVIcTJqdVd5RjU0NTNiYzRaRHBMcW1hSXZ0UGpyN3NyQllLczI2MWpxRFlOWEF1aytyTExoRUt4VmNuY1o1U1pDbDJyWGVIdlRnb3lqZE1IWjYvWTNKZXhuYmg0Q2dkUVlmUU9HQStyQ01mVy9MTmFBV2hCUUlWRzZ1QU5mSEhhMTZQVE1XQTQzdlhTdUFCQ0VRQmR5VVhMQi9WWFlmcklOWTJQTnNPOG1FenVWcFRSaEQrSnRpT3BFejVQTlNXZk9qSUpSYjBBSjVDR2pHb0pSczdrck5QKzZsUWlTL0k5TnV4OThURjRla2FsYkMrOG9wUGFnTEhuL2NxaG9sSm1qQXp6QzV1by9yY1VKcldUNEYvODNoYmpUdEhKT01KdEZXV2lZcURmUWVoOENGOUJoeGs9IHJvb3RAbG9jYWxob3N0 + + pfSense.css + + + user + $2y$10$5i3toKDJLCdLS0w71m7TF.49vokuIneuA97Tq3cJYotKzeEbn0Lqu + + sysgwservice + + 2 + + + pfSense.css + 2001 + user-shell-access + + 2000 + 2000 + ${NTP} + + http + + 602232b5962a3 + 2 + 28080 + 2 + pfSense.css + 1e3f75; + + + yes + + + + 400000 + hadp + hadp + hadp + + monthly + + + + enabled + enabled + 25812 + + Etc/UTC + 115200 + serial + + + + + en_US + + + ${DNS_1} + ${DNS_2} + + + + + + vtnet1 + + + + 32 + ${IP_ADDR_WAN} + ${SUBNET_WAN} + WANGW + + + + vtnet0 + + + ${IP_ADDR_LAN} + ${SUBNET_LAN} + + + + + + + + ${DHCP_RANGE_START} + ${DHCP_RANGE_END} + + + + + 94670856 + 189341712 + + + + + + + + + hmac-md5 + + + + allow + gateway.edison + + + + + + + + + + + + + + ::1000 + ::2000 + + assist + medium + + + + + + public + + + + + + 1 + + + + automatic + + + + + + 1626111739 + pass + wan + inet + + + + + + + + + + tcp + + + + + + 25812 + + + + + pass + inet + + lan + 0100000101 + + lan + + + + + + + + + + + + + + + + + + + 1,31 + 0-5 + * + * + * + root + /usr/bin/nice -n20 adjkerntz -a + + + 1 + 3 + 1 + * + * + root + /usr/bin/nice -n20 /etc/rc.update_bogons.sh + + + 1 + 1 + * + * + * + root + /usr/bin/nice -n20 /etc/rc.dyndns.update + + + */60 + * + * + * + * + root + /usr/bin/nice -n20 /usr/local/sbin/expiretable -v -t 3600 virusprot + + + 30 + 12 + * + * + * + root + /usr/bin/nice -n20 /etc/rc.update_urltables + + + 1 + 0 + * + * + * + root + /usr/bin/nice -n20 /etc/rc.update_pkg_metadata + + + */1 + * + * + * + * + root + /usr/sbin/newsyslog + + + 1 + 3 + * + * + * + root + /etc/rc.periodic daily + + + 15 + 4 + * + * + 6 + root + /etc/rc.periodic weekly + + + 30 + 5 + 1 + * + * + root + /etc/rc.periodic monthly + + + + + + + + system_information:col1:show,netgate_services_and_support:col2:show,interfaces:col2:show + 10 + + + + + all + all + + + + + + 602232b5962a3 + + + + transparent + + + + + + + + + 602232b5962a3 + + server + LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUVoRENDQTJ5Z0F3SUJBZ0lCQURBTkJna3Foa2lHOXcwQkFRc0ZBREJhTVRnd05nWURWUVFLRXk5d1psTmwKYm5ObElIZGxZa052Ym1acFozVnlZWFJ2Y2lCVFpXeG1MVk5wWjI1bFpDQkRaWEowYVdacFkyRjBaVEVlTUJ3RwpBMVVFQXhNVmNHWlRaVzV6WlMwMk1ESXlNekppTlRrMk1tRXpNQjRYRFRJeE1ESXdPVEEyTlRrd01Wb1hEVEl5Ck1ETXhOREEyTlRrd01Wb3dXakU0TURZR0ExVUVDaE12Y0daVFpXNXpaU0IzWldKRGIyNW1hV2QxY21GMGIzSWcKVTJWc1ppMVRhV2R1WldRZ1EyVnlkR2xtYVdOaGRHVXhIakFjQmdOVkJBTVRGWEJtVTJWdWMyVXROakF5TWpNeQpZalU1TmpKaE16Q0NBU0l3RFFZSktvWklodmNOQVFFQkJRQURnZ0VQQURDQ0FRb0NnZ0VCQU1ZblNtUG93OUt6Cml5b2ZVeW5ZaGN2YnN3YTVaVFVYTVVmZGdjaTlzamp2Ymw1eUhHZlhaY1BCdzRuUk14eUQ4enE0dEtuZ2EzSEkKNFNqbDJkamljdHM2c0loQ1FxbUg4bGpUWWVjRkw3WDlpRUdWaHpnU0tPcURzS3NUeGhaMEs0TkVwMlM1bUdTUgowZ0pWbTlBeTJDUzhVWUFjRFUrVmd5bVR5cVVUZW5CcEVyVzlWSEZmUnhxZTBUcFlmT2tXbjRSb3RxMnNrRGlhCnN4dWhWd2pMU3dNR2lqSnJ0aTM3UGQyRnd0UW1CRGY4TEI3NmhuY1BpMmVDWXE1Ly9EZnllZkRPOVVEZ0ZxL3QKNHp6d3JkT2FTcGJoVzVRV0RjWGtNQU96OGNxTTJZZVFxVVZGRGV0aFFPcnZrMjQyaXRMTWtzWmh5aHFOL21ITgpTN0JZN3F6QXVCY0NBd0VBQWFPQ0FWTXdnZ0ZQTUFrR0ExVWRFd1FDTUFBd0VRWUpZSVpJQVliNFFnRUJCQVFECkFnWkFNQXNHQTFVZER3UUVBd0lGb0RBekJnbGdoa2dCaHZoQ0FRMEVKaFlrVDNCbGJsTlRUQ0JIWlc1bGNtRjAKWldRZ1UyVnlkbVZ5SUVObGNuUnBabWxqWVhSbE1CMEdBMVVkRGdRV0JCUzY1eDhTQmJhY3FsN3gzL05UcEFqTgpPUlpaNmpDQmdnWURWUjBqQkhzd2VZQVV1dWNmRWdXMm5LcGU4ZC96VTZRSXpUa1dXZXFoWHFSY01Gb3hPREEyCkJnTlZCQW9UTDNCbVUyVnVjMlVnZDJWaVEyOXVabWxuZFhKaGRHOXlJRk5sYkdZdFUybG5ibVZrSUVObGNuUnAKWm1sallYUmxNUjR3SEFZRFZRUURFeFZ3WmxObGJuTmxMVFl3TWpJek1tSTFPVFl5WVRPQ0FRQXdKd1lEVlIwbApCQ0F3SGdZSUt3WUJCUVVIQXdFR0NDc0dBUVVGQndNQ0JnZ3JCZ0VGQlFnQ0FqQWdCZ05WSFJFRUdUQVhnaFZ3ClpsTmxibk5sTFRZd01qSXpNbUkxT1RZeVlUTXdEUVlKS29aSWh2Y05BUUVMQlFBRGdnRUJBTVFoWi9JNy8vbVAKSVh0dHo0Y0V3MlQ5SjdlSlpGQStSUy9UcStPUVlMU2tSWTNTQVh0cElYWWRhTkdFYWxuVDM4ZmZ6VllFb2hLawpKTXpTUURkUkxYYmZRS2oxZi9ZM1B0NFlOeFg2Q2lFTUI4cWFINFlOZmphVk1pdVloaUIwKzZiaW9Ic1plR2tECitQYlEzTzZ3cUg3aWVlMEM0Q2FHUUhWVFphQ2dOelBFUU9WYmdJc245Wm1UZlBETHF4OG9RZkRybm1ETzFwV1kKWHowMDlWTmRyb3VWNlVEc0xlSTd2VjAwQ3RNa2NFSU1ZTlBaSHliYjZ4Y3EzRU1BQjlYdG9BeklLYXdGWDA3bwo1bkR1T3dqZ0d2SzdqcHp1bmxpNm54TVNlYlpocjdXOWQra1ZUQU1Qbk5zd2ZsY0hvZXo5NXFYVGZzSjA3MXVzCmZYLzloSWZ6cHlzPQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0tCg== + LS0tLS1CRUdJTiBQUklWQVRFIEtFWS0tLS0tCk1JSUV2UUlCQURBTkJna3Foa2lHOXcwQkFRRUZBQVNDQktjd2dnU2pBZ0VBQW9JQkFRREdKMHBqNk1QU3M0c3EKSDFNcDJJWEwyN01HdVdVMUZ6RkgzWUhJdmJJNDcyNWVjaHhuMTJYRHdjT0owVE1jZy9NNnVMU3A0R3R4eU9Fbwo1ZG5ZNG5MYk9yQ0lRa0twaC9KWTAySG5CUysxL1loQmxZYzRFaWpxZzdDckU4WVdkQ3VEUktka3VaaGtrZElDClZadlFNdGdrdkZHQUhBMVBsWU1wazhxbEUzcHdhUksxdlZSeFgwY2FudEU2V0h6cEZwK0VhTGF0ckpBNG1yTWIKb1ZjSXkwc0RCb295YTdZdCt6M2RoY0xVSmdRMy9Dd2Urb1ozRDR0bmdtS3VmL3czOG5ud3p2VkE0QmF2N2VNOAo4SzNUbWtxVzRWdVVGZzNGNURBRHMvSEtqTm1Ia0tsRlJRM3JZVURxNzVOdU5vclN6SkxHWWNvYWpmNWh6VXV3CldPNnN3TGdYQWdNQkFBRUNnZ0VBU1BrWlBUZndQYjBmSi9FSFNHYlR4eUp1aUxCbjVDaFpYclRsWWM3eVBtTjIKaXRPTlFIU2pwbmJLRXd2MnZhNmVPV2JpS1psYkdEazVLajJUZjNyTmY0emFqQ09TdHlvNGhwdTU5MEVDdEF2ZApUUUhMSFV3YXcxcWlDaTZyQ1lQWXJtdnZGWG4vOURGN1hKNjRWZkVUaGx2UHk5VUkxZUVpdjRJTTQ4UUxtQytECjZ2dlNRUXQzc0dxVXJjTlFPejRROUQ1RmJMVThYWFFqZEhFVThWY1oyQlBSWU1tZHRwS0FVUUo2M3MxVDliR3kKaUhMWFdRb1BFWkpZN3lLWERjYmQ2T20zKzB4VXhEVFlmdTExdDk5dHBLaWFmM0plcFVHTEMvN0pwZ2hjN21seApmUWVhSWtoTGphSGZJalNBUWV6UTJqd1hmWGJib1AvNXp2dENhK0Q1QVFLQmdRRGtvYVhHZHBNNC9yUFgyTThaCk1wSlhBZEhvRE5CRDVWVXlqTnphRnBvTFFZa1lDWHdESHgvZnR5VDJHcnJibmdScEdzdndzWEVnS09NK3BGOGoKeVI0OG9LU1A3b1lpMFp5SUpKVERJZ2djczltMXRUVk1xd0NLNGVqTmNzZnV4ZGhma255U2l2bm45N0VnaFUrOQpQREpCamdRZys2NytIdEtBQmRzNG9mSnVnUUtCZ1FEZDM2ZGNvSEJoc1liYWlEM2cyVFdHTDhHeDNNWEI2dlQyCjFJamlISFg0U0xEQi9nMWlGUW5jQkkzM01SV3Q1ajhVc3dZWnE5bWdSZDlXNy95eDNLcFlYUU8rdDZKakcrVkQKVm43T2xPQ2s5Y3FCN0Vhbm9ZaFRmcXVSbGhYUkoxK1gyem9ITmJ6Zm0xbU5YNWtKeWdPZ1BlYy9PWmpZWnV1ZQpkWnFVUHpDS2x3S0JnUUNkVmRCckh6WjVSbTRrNEFMRTZGUmtvOC83ekxXVnc4QTVwVTVtbHA4emw4bnNUTHFXCnUxaUg3OVR6dDdsWFlRclArYXpXMm9WckNEZzU1NHZCYUtueU51cUlVakRrelRMWFRiVkNBNjFwY3lzakxLeWQKTWlDUmNwMXVwTXFKdW9VSVlCcHlEa21rSklRMFNRbHl4SFloUHNZQW5MTDBGZW51U0Jsd2c2SCtBUUtCZ0FxWgprOHBXRStobTdkVms2dWZpR0srSGVvNS95S2JpRGIvNUdXME1CUXJPbGpzK1l1OUxxQVB0NThabWZ1Mm04VjZRCnl2clEvZjJ0T09WVFk5UXZnYWZsRWJlNzJETTU4dk1MbjlWSXdZNi8rMlBIb1B5RE9Cc2RFeDlLZDFKZHVqQnMKRmRLZ1c1eW5ORmtQdHlVRGxMYkVIbGNqN2t4d1psTlV0UVRlRy9pVEFvR0FHb1d5QmJBeWcxbjhWcVBZMm9oTgpnR0FNRm13bzBtQjhWZXN2TFh4TzZubFlKMUI5RWRHVDNvR0FFSzFkc084ZzR1WXMwMkFQYWxEUnFoaUIzV2JGCnVPQ1dxRDljZ2Z6TGdWaXdkOHVLTXFhSHNOTjRWeXpIZ0UyYWY3QnlLa2czQWRBUXFDNk56ZXVvRzRhYm14OWcKTzRwVWFqZWdBd3Nic2lUektmYVpUNEE9Ci0tLS0tRU5EIFBSSVZBVEUgS0VZLS0tLS0K + + + + API + api + + https://github.com/jaredhendrickson13/pfsense-api + System + %%PKGVERSION%% + api.xml + jaredhendrickson13@gmail.com + + + + lan + jwt + json + e568b85aed939826ead0199aa4238e399c9e12806f4dc655f8ee4a74818a19a2 + 3600 + sha256 + 16 + + + + + + /api/ + + + + + /api/v1/ + + + + + + API + The missing REST API package for pfSense +
System
+ /api/ +
+
+ + + WANGW + + wan + ${IP_GATEWAY_WAN} + WANGW + + inet + + + + + 5 + enabled + + + auto + lan + yes + yes + + + + + + + + + + edison + ! + + + +
+ config-xml-dhcp.tmpl: | + + + 21.5 + + + normal + gateway + edison + + all + + system + 1998 + + + admins + + system + 1999 + 0 + page-all + + + admin + + system + admins + $2y$10$K3ejlPZsxpqhd63EzyNgf.qK4RDhlSvW2ln.gIq7kbkVdlRiZ1XB2 + 0 + user-shell-access + 2 + c3NoLXJzYSBBQUFBQjNOemFDMXljMkVBQUFBREFRQUJBQUFCZ1FDdzR4OW43NEtsVnRqUnBBRUhSVWJldnI2dVBDa0NnYkpHSTBKTUFhdnJ3K2JlTmI1V0wyOE1nOFU0MzBiWnhJRWFHU05pSXQreDk2aGFPY0RjOEFaMjRDa0Uxb1ZUS2x6aExyQkJGc0hmaVpuMHRYK1dtZWIyaGZBcmxQRnk2ZXJaZHRVTTUrYUQ2VFBYZmR4M0VWT2FBRzNncTFhbS9lSzFBM1NPVTVjTnFZMExjcWpGVDc0dG03T0NNc1VFSkg4emlCKzQ3ZTh2TWtDZjVIcTJqdVd5RjU0NTNiYzRaRHBMcW1hSXZ0UGpyN3NyQllLczI2MWpxRFlOWEF1aytyTExoRUt4VmNuY1o1U1pDbDJyWGVIdlRnb3lqZE1IWjYvWTNKZXhuYmg0Q2dkUVlmUU9HQStyQ01mVy9MTmFBV2hCUUlWRzZ1QU5mSEhhMTZQVE1XQTQzdlhTdUFCQ0VRQmR5VVhMQi9WWFlmcklOWTJQTnNPOG1FenVWcFRSaEQrSnRpT3BFejVQTlNXZk9qSUpSYjBBSjVDR2pHb0pSczdrck5QKzZsUWlTL0k5TnV4OThURjRla2FsYkMrOG9wUGFnTEhuL2NxaG9sSm1qQXp6QzV1by9yY1VKcldUNEYvODNoYmpUdEhKT01KdEZXV2lZcURmUWVoOENGOUJoeGs9IHJvb3RAbG9jYWxob3N0 + + pfSense.css + + + user + $2y$10$5i3toKDJLCdLS0w71m7TF.49vokuIneuA97Tq3cJYotKzeEbn0Lqu + + sysgwservice + + 2 + + + pfSense.css + 2001 + user-shell-access + + 2000 + 2000 + ${NTP} + + http + + 602232b5962a3 + 2 + 28080 + 2 + pfSense.css + 1e3f75; + + + yes + + + + 400000 + hadp + hadp + hadp + + monthly + + + + enabled + enabled + 25812 + + Etc/UTC + 115200 + serial + + + + + en_US + + + ${DNS_1} + ${DNS_2} + + + + + + vtnet1 + + ${IP_ADDR_WAN} + + + 32 + + + + + + + + SavedCfg + + + + + + + + dhcp6 + + 0 + wan + + + + + vtnet0 + + + ${IP_ADDR_LAN} + ${SUBNET_LAN} + + + + + + + + ${DHCP_RANGE_START} + ${DHCP_RANGE_END} + + + + + 94670856 + 189341712 + + + + + + + + + hmac-md5 + + + + allow + gateway.edison + + + + + + + + + + + + + + ::1000 + ::2000 + + assist + medium + + + + + + public + + + + + + 1 + + + + automatic + + + + + + 1626111739 + pass + wan + inet + + + + + + + + + + tcp + + + + + + 25812 + + + + + pass + inet + + lan + 0100000101 + + lan + + + + + + + + + + + + + + + + + + + 1,31 + 0-5 + * + * + * + root + /usr/bin/nice -n20 adjkerntz -a + + + 1 + 3 + 1 + * + * + root + /usr/bin/nice -n20 /etc/rc.update_bogons.sh + + + 1 + 1 + * + * + * + root + /usr/bin/nice -n20 /etc/rc.dyndns.update + + + */60 + * + * + * + * + root + /usr/bin/nice -n20 /usr/local/sbin/expiretable -v -t 3600 virusprot + + + 30 + 12 + * + * + * + root + /usr/bin/nice -n20 /etc/rc.update_urltables + + + 1 + 0 + * + * + * + root + /usr/bin/nice -n20 /etc/rc.update_pkg_metadata + + + */1 + * + * + * + * + root + /usr/sbin/newsyslog + + + 1 + 3 + * + * + * + root + /etc/rc.periodic daily + + + 15 + 4 + * + * + 6 + root + /etc/rc.periodic weekly + + + 30 + 5 + 1 + * + * + root + /etc/rc.periodic monthly + + + + + + + + system_information:col1:show,netgate_services_and_support:col2:show,interfaces:col2:show + 10 + + + + + all + all + + + + + + 602232b5962a3 + + + + transparent + + + + + + + + + 602232b5962a3 + + server + LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUVoRENDQTJ5Z0F3SUJBZ0lCQURBTkJna3Foa2lHOXcwQkFRc0ZBREJhTVRnd05nWURWUVFLRXk5d1psTmwKYm5ObElIZGxZa052Ym1acFozVnlZWFJ2Y2lCVFpXeG1MVk5wWjI1bFpDQkRaWEowYVdacFkyRjBaVEVlTUJ3RwpBMVVFQXhNVmNHWlRaVzV6WlMwMk1ESXlNekppTlRrMk1tRXpNQjRYRFRJeE1ESXdPVEEyTlRrd01Wb1hEVEl5Ck1ETXhOREEyTlRrd01Wb3dXakU0TURZR0ExVUVDaE12Y0daVFpXNXpaU0IzWldKRGIyNW1hV2QxY21GMGIzSWcKVTJWc1ppMVRhV2R1WldRZ1EyVnlkR2xtYVdOaGRHVXhIakFjQmdOVkJBTVRGWEJtVTJWdWMyVXROakF5TWpNeQpZalU1TmpKaE16Q0NBU0l3RFFZSktvWklodmNOQVFFQkJRQURnZ0VQQURDQ0FRb0NnZ0VCQU1ZblNtUG93OUt6Cml5b2ZVeW5ZaGN2YnN3YTVaVFVYTVVmZGdjaTlzamp2Ymw1eUhHZlhaY1BCdzRuUk14eUQ4enE0dEtuZ2EzSEkKNFNqbDJkamljdHM2c0loQ1FxbUg4bGpUWWVjRkw3WDlpRUdWaHpnU0tPcURzS3NUeGhaMEs0TkVwMlM1bUdTUgowZ0pWbTlBeTJDUzhVWUFjRFUrVmd5bVR5cVVUZW5CcEVyVzlWSEZmUnhxZTBUcFlmT2tXbjRSb3RxMnNrRGlhCnN4dWhWd2pMU3dNR2lqSnJ0aTM3UGQyRnd0UW1CRGY4TEI3NmhuY1BpMmVDWXE1Ly9EZnllZkRPOVVEZ0ZxL3QKNHp6d3JkT2FTcGJoVzVRV0RjWGtNQU96OGNxTTJZZVFxVVZGRGV0aFFPcnZrMjQyaXRMTWtzWmh5aHFOL21ITgpTN0JZN3F6QXVCY0NBd0VBQWFPQ0FWTXdnZ0ZQTUFrR0ExVWRFd1FDTUFBd0VRWUpZSVpJQVliNFFnRUJCQVFECkFnWkFNQXNHQTFVZER3UUVBd0lGb0RBekJnbGdoa2dCaHZoQ0FRMEVKaFlrVDNCbGJsTlRUQ0JIWlc1bGNtRjAKWldRZ1UyVnlkbVZ5SUVObGNuUnBabWxqWVhSbE1CMEdBMVVkRGdRV0JCUzY1eDhTQmJhY3FsN3gzL05UcEFqTgpPUlpaNmpDQmdnWURWUjBqQkhzd2VZQVV1dWNmRWdXMm5LcGU4ZC96VTZRSXpUa1dXZXFoWHFSY01Gb3hPREEyCkJnTlZCQW9UTDNCbVUyVnVjMlVnZDJWaVEyOXVabWxuZFhKaGRHOXlJRk5sYkdZdFUybG5ibVZrSUVObGNuUnAKWm1sallYUmxNUjR3SEFZRFZRUURFeFZ3WmxObGJuTmxMVFl3TWpJek1tSTFPVFl5WVRPQ0FRQXdKd1lEVlIwbApCQ0F3SGdZSUt3WUJCUVVIQXdFR0NDc0dBUVVGQndNQ0JnZ3JCZ0VGQlFnQ0FqQWdCZ05WSFJFRUdUQVhnaFZ3ClpsTmxibk5sTFRZd01qSXpNbUkxT1RZeVlUTXdEUVlKS29aSWh2Y05BUUVMQlFBRGdnRUJBTVFoWi9JNy8vbVAKSVh0dHo0Y0V3MlQ5SjdlSlpGQStSUy9UcStPUVlMU2tSWTNTQVh0cElYWWRhTkdFYWxuVDM4ZmZ6VllFb2hLawpKTXpTUURkUkxYYmZRS2oxZi9ZM1B0NFlOeFg2Q2lFTUI4cWFINFlOZmphVk1pdVloaUIwKzZiaW9Ic1plR2tECitQYlEzTzZ3cUg3aWVlMEM0Q2FHUUhWVFphQ2dOelBFUU9WYmdJc245Wm1UZlBETHF4OG9RZkRybm1ETzFwV1kKWHowMDlWTmRyb3VWNlVEc0xlSTd2VjAwQ3RNa2NFSU1ZTlBaSHliYjZ4Y3EzRU1BQjlYdG9BeklLYXdGWDA3bwo1bkR1T3dqZ0d2SzdqcHp1bmxpNm54TVNlYlpocjdXOWQra1ZUQU1Qbk5zd2ZsY0hvZXo5NXFYVGZzSjA3MXVzCmZYLzloSWZ6cHlzPQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0tCg== + LS0tLS1CRUdJTiBQUklWQVRFIEtFWS0tLS0tCk1JSUV2UUlCQURBTkJna3Foa2lHOXcwQkFRRUZBQVNDQktjd2dnU2pBZ0VBQW9JQkFRREdKMHBqNk1QU3M0c3EKSDFNcDJJWEwyN01HdVdVMUZ6RkgzWUhJdmJJNDcyNWVjaHhuMTJYRHdjT0owVE1jZy9NNnVMU3A0R3R4eU9Fbwo1ZG5ZNG5MYk9yQ0lRa0twaC9KWTAySG5CUysxL1loQmxZYzRFaWpxZzdDckU4WVdkQ3VEUktka3VaaGtrZElDClZadlFNdGdrdkZHQUhBMVBsWU1wazhxbEUzcHdhUksxdlZSeFgwY2FudEU2V0h6cEZwK0VhTGF0ckpBNG1yTWIKb1ZjSXkwc0RCb295YTdZdCt6M2RoY0xVSmdRMy9Dd2Urb1ozRDR0bmdtS3VmL3czOG5ud3p2VkE0QmF2N2VNOAo4SzNUbWtxVzRWdVVGZzNGNURBRHMvSEtqTm1Ia0tsRlJRM3JZVURxNzVOdU5vclN6SkxHWWNvYWpmNWh6VXV3CldPNnN3TGdYQWdNQkFBRUNnZ0VBU1BrWlBUZndQYjBmSi9FSFNHYlR4eUp1aUxCbjVDaFpYclRsWWM3eVBtTjIKaXRPTlFIU2pwbmJLRXd2MnZhNmVPV2JpS1psYkdEazVLajJUZjNyTmY0emFqQ09TdHlvNGhwdTU5MEVDdEF2ZApUUUhMSFV3YXcxcWlDaTZyQ1lQWXJtdnZGWG4vOURGN1hKNjRWZkVUaGx2UHk5VUkxZUVpdjRJTTQ4UUxtQytECjZ2dlNRUXQzc0dxVXJjTlFPejRROUQ1RmJMVThYWFFqZEhFVThWY1oyQlBSWU1tZHRwS0FVUUo2M3MxVDliR3kKaUhMWFdRb1BFWkpZN3lLWERjYmQ2T20zKzB4VXhEVFlmdTExdDk5dHBLaWFmM0plcFVHTEMvN0pwZ2hjN21seApmUWVhSWtoTGphSGZJalNBUWV6UTJqd1hmWGJib1AvNXp2dENhK0Q1QVFLQmdRRGtvYVhHZHBNNC9yUFgyTThaCk1wSlhBZEhvRE5CRDVWVXlqTnphRnBvTFFZa1lDWHdESHgvZnR5VDJHcnJibmdScEdzdndzWEVnS09NK3BGOGoKeVI0OG9LU1A3b1lpMFp5SUpKVERJZ2djczltMXRUVk1xd0NLNGVqTmNzZnV4ZGhma255U2l2bm45N0VnaFUrOQpQREpCamdRZys2NytIdEtBQmRzNG9mSnVnUUtCZ1FEZDM2ZGNvSEJoc1liYWlEM2cyVFdHTDhHeDNNWEI2dlQyCjFJamlISFg0U0xEQi9nMWlGUW5jQkkzM01SV3Q1ajhVc3dZWnE5bWdSZDlXNy95eDNLcFlYUU8rdDZKakcrVkQKVm43T2xPQ2s5Y3FCN0Vhbm9ZaFRmcXVSbGhYUkoxK1gyem9ITmJ6Zm0xbU5YNWtKeWdPZ1BlYy9PWmpZWnV1ZQpkWnFVUHpDS2x3S0JnUUNkVmRCckh6WjVSbTRrNEFMRTZGUmtvOC83ekxXVnc4QTVwVTVtbHA4emw4bnNUTHFXCnUxaUg3OVR6dDdsWFlRclArYXpXMm9WckNEZzU1NHZCYUtueU51cUlVakRrelRMWFRiVkNBNjFwY3lzakxLeWQKTWlDUmNwMXVwTXFKdW9VSVlCcHlEa21rSklRMFNRbHl4SFloUHNZQW5MTDBGZW51U0Jsd2c2SCtBUUtCZ0FxWgprOHBXRStobTdkVms2dWZpR0srSGVvNS95S2JpRGIvNUdXME1CUXJPbGpzK1l1OUxxQVB0NThabWZ1Mm04VjZRCnl2clEvZjJ0T09WVFk5UXZnYWZsRWJlNzJETTU4dk1MbjlWSXdZNi8rMlBIb1B5RE9Cc2RFeDlLZDFKZHVqQnMKRmRLZ1c1eW5ORmtQdHlVRGxMYkVIbGNqN2t4d1psTlV0UVRlRy9pVEFvR0FHb1d5QmJBeWcxbjhWcVBZMm9oTgpnR0FNRm13bzBtQjhWZXN2TFh4TzZubFlKMUI5RWRHVDNvR0FFSzFkc084ZzR1WXMwMkFQYWxEUnFoaUIzV2JGCnVPQ1dxRDljZ2Z6TGdWaXdkOHVLTXFhSHNOTjRWeXpIZ0UyYWY3QnlLa2czQWRBUXFDNk56ZXVvRzRhYm14OWcKTzRwVWFqZWdBd3Nic2lUektmYVpUNEE9Ci0tLS0tRU5EIFBSSVZBVEUgS0VZLS0tLS0K + + + + API + api + + https://github.com/jaredhendrickson13/pfsense-api + System + %%PKGVERSION%% + api.xml + jaredhendrickson13@gmail.com + + + + lan + jwt + json + e568b85aed939826ead0199aa4238e399c9e12806f4dc655f8ee4a74818a19a2 + 3600 + sha256 + 16 + + + + + + /api/ + + + + + /api/v1/ + + + + + + API + The missing REST API package for pfSense +
System
+ /api/ +
+
+ + + + 5 + enabled + + + auto + lan + yes + yes + + + + + + + + + + edison + ! + + + +
+ kind: ConfigMap + metadata: + name: pfsense-vm-files + --- + apiVersion: v1 + data: + credentials: VVJJX1RFTVBMQVRFOiAicWVtdStzc2g6Ly8lc0Alcy9zeXN0ZW0/a2V5ZmlsZT0lcyZzb2NrZXQ9JXMma25vd25faG9zdHNfdmVyaWZ5PWlnbm9yZSI= + kind: Secret + metadata: + name: libvirt-account-creds + type: Opaque + --- + apiVersion: terraform.core.oam.dev/v1beta1 + kind: Provider + metadata: + name: libvirt + spec: + provider: custom + credentials: + source: Secret + secretRef: + name: libvirt-account-creds + key: credentials + --- + apiVersion: terraform.core.oam.dev/v1beta1 + kind: Configuration + metadata: + name: pfsense-gateway-vm-config + spec: + volumeSpec: + volumeMounts: + - name: libvirt + mountPath: /var/run/libvirt/libvirt-sock + - name: spectro-directory + mountPath: /opt/spectrocloud + - mountPath: "/var/files" + name: files-vol + volumes: + - name: files-vol + configMap: + name: pfsense-vm-files + - name: libvirt + hostPath: + path: /var/run/libvirt/libvirt-sock + type: Socket + - name: spectro-directory + hostPath: + path: /opt/spectrocloud + type: Directory + deleteResource: true + variable: + VM_NAME: "pfsense-gateway-vm" + NTP: FROM_SECRET_REF + DNS_1: FROM_SECRET_REF + DNS_2: FROM_SECRET_REF + IP_ADDR_WAN: FROM_SECRET_REF + IP_GATEWAY_WAN: FROM_SECRET_REF + SUBNET_WAN: FROM_SECRET_REF + IP_ADDR_LAN: FROM_SECRET_REF + SUBNET_LAN: FROM_SECRET_REF + DHCP_RANGE_START: FROM_SECRET_REF + DHCP_RANGE_END: FROM_SECRET_REF + providerRef: + name: libvirt + hcl: | + terraform { + required_version = ">= 0.13" + required_providers { + libvirt = { + source = "dmacvicar/libvirt" + version = "0.6.14" + } + } + } + + locals { + config_file = try(var.IP_ADDR_WAN == "dhcp", false) ? "/var/files/config-xml-dhcp.tmpl" : "/var/files/config-xml-static.tmpl" + + dhcp_param_map = { + NTP = var.NTP + DNS_1 = var.DNS_1 + DNS_2 = var.DNS_2 + IP_ADDR_WAN = var.IP_ADDR_WAN + IP_ADDR_LAN = var.IP_ADDR_LAN + SUBNET_LAN = var.SUBNET_LAN + DHCP_RANGE_START = var.DHCP_RANGE_START + DHCP_RANGE_END = var.DHCP_RANGE_END + } + + static_param_map = { + NTP = var.NTP + DNS_1 = var.DNS_1 + DNS_2 = var.DNS_2 + IP_ADDR_WAN = var.IP_ADDR_WAN + SUBNET_WAN = var.SUBNET_WAN + IP_GATEWAY_WAN = var.IP_GATEWAY_WAN + IP_ADDR_LAN = var.IP_ADDR_LAN + SUBNET_LAN = var.SUBNET_LAN + DHCP_RANGE_START = var.DHCP_RANGE_START + DHCP_RANGE_END = var.DHCP_RANGE_END + } + } + + ##### VARIABLES ##### + variable "URI_TEMPLATE" { + type = string + default = "qemu+ssh://%s@%s/system?keyfile=%s&socket=%s&known_hosts_verify=ignore" + } + + variable "NTP" { + type = string + default = "ntp.ge.com ntp1.ge.com" + } + + variable "DNS_1" { + type = string + default = "10.220.220.220" + } + + variable "DNS_2" { + type = string + default = "10.220.220.221" + } + + variable "IP_ADDR_WAN" { + type = string + default = "dhcp" + } + + variable "SUBNET_WAN" { + type = string + default = "23" + } + + variable "IP_GATEWAY_WAN" { + type = string + default = "" + } + + variable "IP_ADDR_LAN" { + type = string + default = "192.168.100.1" + } + + variable "SUBNET_LAN" { + type = string + default = "24" + } + + variable "DHCP_RANGE_START" { + type = string + default = "192.168.100.50" + } + + variable "DHCP_RANGE_END" { + type = string + default = "192.168.100.250" + } + + variable "LIBVIRT_SOCKET" { + type = string + default = "/var/run/libvirt/libvirt-sock" + } + + variable "VM_NAME" { + type = string + default = "pfsense-terraform" + } + + ##### OUTPUT ##### + output "config_value" { + value = data.template_file.config.rendered + } + + output "config_file" { + value = local.config_file + } + + output "config_params" { + value = try(var.IP_ADDR_WAN == "dhcp", false) ? local.dhcp_param_map : local.static_param_map + } + + ##### PROVIDER ##### + provider "libvirt" { + uri = "qemu:///system" + #uri = format(var.URI_TEMPLATE, var.SSH_USER, var.HOST_IP, var.SSH_KEY, var.LIBVIRT_SOCKET) + } + + data "template_file" "config" { + template = file(local.config_file) + vars = try(var.IP_ADDR_WAN == "dhcp", false) ? local.dhcp_param_map : local.static_param_map + } + + data "template_file" "user_data" { + template = file("/var/files/user-data.tmpl") + vars = { + CONFIG_XML = base64encode(data.template_file.config.rendered) + } + } + + resource "libvirt_pool" "ubuntu" { + name = "ubuntuop" + type = "dir" + path = "/var/lib/libvirt/terraform-provider-libvirt-pool-ubuntuoperator" + } + + resource "libvirt_volume" "ubuntu-qcow2" { + name = "ubuntu-qcow2" + pool = libvirt_pool.ubuntu.name + source = "/opt/spectrocloud/vm-operator/ehl-system-gateway.qcow2" + format = "qcow2" + } + + resource "libvirt_cloudinit_disk" "commoninit" { + name = "commoninit.iso" + user_data = data.template_file.user_data.rendered + pool = libvirt_pool.ubuntu.name + } + + resource "libvirt_domain" "domain-ubuntu" { + name = var.VM_NAME + memory = "2048" + vcpu = 1 + + cloudinit = libvirt_cloudinit_disk.commoninit.id + + network_interface { + bridge = "br0" + } + + network_interface { + bridge = "br1" + } + + console { + type = "pty" + target_port = "0" + target_type = "serial" + } + + disk { + volume_id = libvirt_volume.ubuntu-qcow2.id + } + + graphics { + type = "vnc" + listen_type = "address" + listen_address = "0.0.0.0" + autoport = true + } + } + diff --git a/examples/local-simplified-yaml/project-bm-200/main.tf b/examples/local-simplified-yaml/project-bm-200/main.tf new file mode 100644 index 0000000..3a7298c --- /dev/null +++ b/examples/local-simplified-yaml/project-bm-200/main.tf @@ -0,0 +1,71 @@ +terraform { + required_version = ">= 0.14.0" + + required_providers { + spectrocloud = { + version = "= 0.6.10-pre" + source = "spectrocloud/spectrocloud" + } + } +} + +variable "sc_host" {} +variable "sc_api_key" { + sensitive = true +} +variable "sc_project_name" {} + +provider "spectrocloud" { + host = var.sc_host + api_key = var.sc_api_key + project_name = var.sc_project_name +} + +locals { + profiles = { + for k in fileset("config/profile", "profile-*.yaml") : + trimsuffix(k, ".yaml") => yamldecode(file("config/profile/${k}")) + } + + appliances = { + for k in fileset("config/appliance", "appliance-*.yaml") : + trimsuffix(k, ".yaml") => yamldecode(file("config/appliance/${k}")) + } + + clusters = { + for k in fileset("config/cluster", "cluster-*.yaml") : + trimsuffix(k, ".yaml") => yamldecode(file("config/cluster/${k}")) + } +} + +module "SpectroSystemProfile" { + source = "github.com/spectrocloud/terraform-spectrocloud-modules" + + # It is recommended to use latest version of module instead of using latest from github + #source = "spectrocloud/modules/spectrocloud" + #version = "0.0.7" + + profiles = local.profiles +} + +module "SpectroAppliance" { + depends_on = [module.SpectroSystemProfile] + source = "github.com/spectrocloud/terraform-spectrocloud-modules" + + # It is recommended to use latest version of module instead of using latest from github + #source = "spectrocloud/modules/spectrocloud" + #version = "0.0.7" + + appliances = local.appliances +} + +module "SpectroCluster" { + depends_on = [module.SpectroAppliance] + source = "github.com/spectrocloud/terraform-spectrocloud-modules" + + # It is recommended to use latest version of module instead of using latest from github + #source = "spectrocloud/modules/spectrocloud" + #version = "0.0.7" + + clusters = local.clusters +} diff --git a/examples/local-simplified-yaml/project-bm-200/terraform.template.tfvars b/examples/local-simplified-yaml/project-bm-200/terraform.template.tfvars new file mode 100644 index 0000000..3c4423e --- /dev/null +++ b/examples/local-simplified-yaml/project-bm-200/terraform.template.tfvars @@ -0,0 +1,4 @@ +# Credentials +sc_host = "{enter Spectro Cloud host, blank for SaaS}" +sc_api_key = "{enter Spectro Cloud API Key}" +sc_project_name = "{enter Spectro Cloud Project Name}" \ No newline at end of file diff --git a/examples/local-simplified-yaml/project-vsphere-with-creds-201/README.md b/examples/local-simplified-yaml/project-vsphere-with-creds-201/README.md new file mode 100644 index 0000000..73757e1 --- /dev/null +++ b/examples/local-simplified-yaml/project-vsphere-with-creds-201/README.md @@ -0,0 +1 @@ +Vsphere with creds appliance project \ No newline at end of file diff --git a/examples/local-simplified-yaml/project-vsphere-with-creds-201/config/appliance/appliance-hospital-201.yaml b/examples/local-simplified-yaml/project-vsphere-with-creds-201/config/appliance/appliance-hospital-201.yaml new file mode 100644 index 0000000..515e47e --- /dev/null +++ b/examples/local-simplified-yaml/project-vsphere-with-creds-201/config/appliance/appliance-hospital-201.yaml @@ -0,0 +1 @@ +id: "hospital-201" diff --git a/examples/local-simplified-yaml/project-vsphere-with-creds-201/config/cluster/cluster-hospital-201.yaml b/examples/local-simplified-yaml/project-vsphere-with-creds-201/config/cluster/cluster-hospital-201.yaml new file mode 100644 index 0000000..32b51de --- /dev/null +++ b/examples/local-simplified-yaml/project-vsphere-with-creds-201/config/cluster/cluster-hospital-201.yaml @@ -0,0 +1,44 @@ +name: hospital-201 +cloudType: edge-vsphere +edge_host_uid: hospital-201 +tags: +- "skip_completion" +profiles: + infra: + name: vsphere-with-creds-infra + system: + name: hospital-201-system-profile + addons: + - name: spectro-core +cloud_config: + ssh_key: "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQCr3hE9IS5UUDPqNOiEWVJvVDS0v57QKjb1o9ubvvATQNg2T3x+inckfzfgX3et1H9X1oSp0FqY1+Mmy5nfTtTyIj5Get1cntcC4QqdZV8Op1tmpI01hYMj4lLn55WNaXgDt+35tJ47kWRr5RqTGV05MPNWN3klaVsePsqa+MgCjnLfCBiOz1tpBOgxqPNqtQPXh+/T/Ul6ZDUW/rySr9iNR9uGd04tYzD7wdTdvmZSRgWEre//IipNzMnnZC7El5KJCQn8ksF+DYY9eT9NtNFEMALTZC6hn8BnMc14zqxoJP/GNHftmig8TJC500Uofdr4OKTCRr1JwHS79Cx9LyZdAp/1D8mL6bIMyGOTPVQ8xUpmEYj77m1kdiCHCk22YtLyfUWuQ0SC+2p1soDoNfJUpmxcKboOTZsLq1HDCFrqSyLUWS1PrYZ/MzhsPrsDewB1iHLbYDt87r2odJOpxMO1vNWMOYontODdr5JPKBpCcd/noNyOy/m4Spntytfb/J3kM1oz3dpPfN0xXmC19uR1xHklmbtg1j784IMu7umI2ZCpUwLADAodkbxmbacdkp5I+1NFgrFamvnTjjQAvRexV31m4m9GielKFQ4tCCId2yagMBWRFn5taEhb3SKnRxBcAzaJLopUyErOtqxvSywGvb53v4MEShqBaQSUv4gHfw== spectro2022" + vip: 192.168.100.15 + static_ip: false + network_type: VIP + datacenter: "Spectrocloud" + folder: "demo-creds-1" +node_groups: + - name: master-pool + control_plane: true + control_plane_as_worker: true + count: 1 + disk_size_gb: 60 + memory_mb: 8192 + cpu: 2 + placement: + cluster: "Spectrocloud" + resource_pool: "Resources" + datastore: "VOL_SC_CMP1_02" + network: "Spectro-Pvt-Net05" + + - name: worker-pool + count: 3 + disk_size_gb: 60 + memory_mb: 8192 + cpu: 4 + placement: + cluster: "Spectrocloud" + resource_pool: "Resources" + datastore: "VOL_SC_CMP1_02" + network: "Spectro-Pvt-Net05" + diff --git a/examples/local-simplified-yaml/project-vsphere-with-creds-201/config/profile/profile-system.yaml b/examples/local-simplified-yaml/project-vsphere-with-creds-201/config/profile/profile-system.yaml new file mode 100644 index 0000000..34c256b --- /dev/null +++ b/examples/local-simplified-yaml/project-vsphere-with-creds-201/config/profile/profile-system.yaml @@ -0,0 +1,751 @@ +name: hospital-201-system-profile +description: system-profile +type: system +cloudType: all +packs: + - name: "pfsense-gateway" + type: manifest + registry: Public Repo + manifests: + - name: pfsense-config + content: | + apiVersion: v1 + data: + user-data.tmpl: | + #cloud-config + write_files: + - encoding: base64 + content: ${CONFIG_XML} + owner: root:wheel + path: /cf/conf/config.xml + permissions: '0644' + config-xml.tmpl: | + + + 21.5 + + + normal + gateway + edison + + all + + system + 1998 + + + admins + + system + 1999 + 0 + page-all + + + admin + + system + admins + $2y$10$K3ejlPZsxpqhd63EzyNgf.qK4RDhlSvW2ln.gIq7kbkVdlRiZ1XB2 + 0 + user-shell-access + 2 + c3NoLXJzYSBBQUFBQjNOemFDMXljMkVBQUFBREFRQUJBQUFCZ1FDdzR4OW43NEtsVnRqUnBBRUhSVWJldnI2dVBDa0NnYkpHSTBKTUFhdnJ3K2JlTmI1V0wyOE1nOFU0MzBiWnhJRWFHU05pSXQreDk2aGFPY0RjOEFaMjRDa0Uxb1ZUS2x6aExyQkJGc0hmaVpuMHRYK1dtZWIyaGZBcmxQRnk2ZXJaZHRVTTUrYUQ2VFBYZmR4M0VWT2FBRzNncTFhbS9lSzFBM1NPVTVjTnFZMExjcWpGVDc0dG03T0NNc1VFSkg4emlCKzQ3ZTh2TWtDZjVIcTJqdVd5RjU0NTNiYzRaRHBMcW1hSXZ0UGpyN3NyQllLczI2MWpxRFlOWEF1aytyTExoRUt4VmNuY1o1U1pDbDJyWGVIdlRnb3lqZE1IWjYvWTNKZXhuYmg0Q2dkUVlmUU9HQStyQ01mVy9MTmFBV2hCUUlWRzZ1QU5mSEhhMTZQVE1XQTQzdlhTdUFCQ0VRQmR5VVhMQi9WWFlmcklOWTJQTnNPOG1FenVWcFRSaEQrSnRpT3BFejVQTlNXZk9qSUpSYjBBSjVDR2pHb0pSczdrck5QKzZsUWlTL0k5TnV4OThURjRla2FsYkMrOG9wUGFnTEhuL2NxaG9sSm1qQXp6QzV1by9yY1VKcldUNEYvODNoYmpUdEhKT01KdEZXV2lZcURmUWVoOENGOUJoeGs9IHJvb3RAbG9jYWxob3N0 + + pfSense.css + + + user + $2y$10$5i3toKDJLCdLS0w71m7TF.49vokuIneuA97Tq3cJYotKzeEbn0Lqu + + sysgwservice + + 2 + + + pfSense.css + 2001 + user-shell-access + + 2000 + 2000 + ${NTP} + + http + + 602232b5962a3 + 2 + 28080 + 2 + pfSense.css + 1e3f75; + + + yes + + + + 400000 + hadp + hadp + hadp + + monthly + + + + enabled + enabled + 25812 + + Etc/UTC + 115200 + serial + + + + + en_US + + + ${DNS_1} + ${DNS_2} + + + + + + vmx1 + + + + 32 + ${IP_ADDR_WAN} + ${SUBNET_WAN} + WANGW + + + + vmx0 + + + ${IP_ADDR_LAN} + ${SUBNET_LAN} + + + + + + + + ${DHCP_RANGE_START} + ${DHCP_RANGE_END} + + + + + 94670856 + 189341712 + + + + + + + + + hmac-md5 + + + + allow + gateway.edison + + + + + + + + + + + + + + ::1000 + ::2000 + + assist + medium + + + + + + public + + + + + + 1 + + + + automatic + + + + + + 1626111739 + pass + wan + inet + + + + + + + + + + tcp + + + + + + 25812 + + + + + pass + inet + + lan + 0100000101 + + lan + + + + + + + + + + + + + + + + + + + 1,31 + 0-5 + * + * + * + root + /usr/bin/nice -n20 adjkerntz -a + + + 1 + 3 + 1 + * + * + root + /usr/bin/nice -n20 /etc/rc.update_bogons.sh + + + 1 + 1 + * + * + * + root + /usr/bin/nice -n20 /etc/rc.dyndns.update + + + */60 + * + * + * + * + root + /usr/bin/nice -n20 /usr/local/sbin/expiretable -v -t 3600 virusprot + + + 30 + 12 + * + * + * + root + /usr/bin/nice -n20 /etc/rc.update_urltables + + + 1 + 0 + * + * + * + root + /usr/bin/nice -n20 /etc/rc.update_pkg_metadata + + + */1 + * + * + * + * + root + /usr/sbin/newsyslog + + + 1 + 3 + * + * + * + root + /etc/rc.periodic daily + + + 15 + 4 + * + * + 6 + root + /etc/rc.periodic weekly + + + 30 + 5 + 1 + * + * + root + /etc/rc.periodic monthly + + + + + + + + system_information:col1:show,netgate_services_and_support:col2:show,interfaces:col2:show + 10 + + + + + all + all + + + + + + 602232b5962a3 + + + + transparent + + + + + + + + + 602232b5962a3 + + server + LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUVoRENDQTJ5Z0F3SUJBZ0lCQURBTkJna3Foa2lHOXcwQkFRc0ZBREJhTVRnd05nWURWUVFLRXk5d1psTmwKYm5ObElIZGxZa052Ym1acFozVnlZWFJ2Y2lCVFpXeG1MVk5wWjI1bFpDQkRaWEowYVdacFkyRjBaVEVlTUJ3RwpBMVVFQXhNVmNHWlRaVzV6WlMwMk1ESXlNekppTlRrMk1tRXpNQjRYRFRJeE1ESXdPVEEyTlRrd01Wb1hEVEl5Ck1ETXhOREEyTlRrd01Wb3dXakU0TURZR0ExVUVDaE12Y0daVFpXNXpaU0IzWldKRGIyNW1hV2QxY21GMGIzSWcKVTJWc1ppMVRhV2R1WldRZ1EyVnlkR2xtYVdOaGRHVXhIakFjQmdOVkJBTVRGWEJtVTJWdWMyVXROakF5TWpNeQpZalU1TmpKaE16Q0NBU0l3RFFZSktvWklodmNOQVFFQkJRQURnZ0VQQURDQ0FRb0NnZ0VCQU1ZblNtUG93OUt6Cml5b2ZVeW5ZaGN2YnN3YTVaVFVYTVVmZGdjaTlzamp2Ymw1eUhHZlhaY1BCdzRuUk14eUQ4enE0dEtuZ2EzSEkKNFNqbDJkamljdHM2c0loQ1FxbUg4bGpUWWVjRkw3WDlpRUdWaHpnU0tPcURzS3NUeGhaMEs0TkVwMlM1bUdTUgowZ0pWbTlBeTJDUzhVWUFjRFUrVmd5bVR5cVVUZW5CcEVyVzlWSEZmUnhxZTBUcFlmT2tXbjRSb3RxMnNrRGlhCnN4dWhWd2pMU3dNR2lqSnJ0aTM3UGQyRnd0UW1CRGY4TEI3NmhuY1BpMmVDWXE1Ly9EZnllZkRPOVVEZ0ZxL3QKNHp6d3JkT2FTcGJoVzVRV0RjWGtNQU96OGNxTTJZZVFxVVZGRGV0aFFPcnZrMjQyaXRMTWtzWmh5aHFOL21ITgpTN0JZN3F6QXVCY0NBd0VBQWFPQ0FWTXdnZ0ZQTUFrR0ExVWRFd1FDTUFBd0VRWUpZSVpJQVliNFFnRUJCQVFECkFnWkFNQXNHQTFVZER3UUVBd0lGb0RBekJnbGdoa2dCaHZoQ0FRMEVKaFlrVDNCbGJsTlRUQ0JIWlc1bGNtRjAKWldRZ1UyVnlkbVZ5SUVObGNuUnBabWxqWVhSbE1CMEdBMVVkRGdRV0JCUzY1eDhTQmJhY3FsN3gzL05UcEFqTgpPUlpaNmpDQmdnWURWUjBqQkhzd2VZQVV1dWNmRWdXMm5LcGU4ZC96VTZRSXpUa1dXZXFoWHFSY01Gb3hPREEyCkJnTlZCQW9UTDNCbVUyVnVjMlVnZDJWaVEyOXVabWxuZFhKaGRHOXlJRk5sYkdZdFUybG5ibVZrSUVObGNuUnAKWm1sallYUmxNUjR3SEFZRFZRUURFeFZ3WmxObGJuTmxMVFl3TWpJek1tSTFPVFl5WVRPQ0FRQXdKd1lEVlIwbApCQ0F3SGdZSUt3WUJCUVVIQXdFR0NDc0dBUVVGQndNQ0JnZ3JCZ0VGQlFnQ0FqQWdCZ05WSFJFRUdUQVhnaFZ3ClpsTmxibk5sTFRZd01qSXpNbUkxT1RZeVlUTXdEUVlKS29aSWh2Y05BUUVMQlFBRGdnRUJBTVFoWi9JNy8vbVAKSVh0dHo0Y0V3MlQ5SjdlSlpGQStSUy9UcStPUVlMU2tSWTNTQVh0cElYWWRhTkdFYWxuVDM4ZmZ6VllFb2hLawpKTXpTUURkUkxYYmZRS2oxZi9ZM1B0NFlOeFg2Q2lFTUI4cWFINFlOZmphVk1pdVloaUIwKzZiaW9Ic1plR2tECitQYlEzTzZ3cUg3aWVlMEM0Q2FHUUhWVFphQ2dOelBFUU9WYmdJc245Wm1UZlBETHF4OG9RZkRybm1ETzFwV1kKWHowMDlWTmRyb3VWNlVEc0xlSTd2VjAwQ3RNa2NFSU1ZTlBaSHliYjZ4Y3EzRU1BQjlYdG9BeklLYXdGWDA3bwo1bkR1T3dqZ0d2SzdqcHp1bmxpNm54TVNlYlpocjdXOWQra1ZUQU1Qbk5zd2ZsY0hvZXo5NXFYVGZzSjA3MXVzCmZYLzloSWZ6cHlzPQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0tCg== + LS0tLS1CRUdJTiBQUklWQVRFIEtFWS0tLS0tCk1JSUV2UUlCQURBTkJna3Foa2lHOXcwQkFRRUZBQVNDQktjd2dnU2pBZ0VBQW9JQkFRREdKMHBqNk1QU3M0c3EKSDFNcDJJWEwyN01HdVdVMUZ6RkgzWUhJdmJJNDcyNWVjaHhuMTJYRHdjT0owVE1jZy9NNnVMU3A0R3R4eU9Fbwo1ZG5ZNG5MYk9yQ0lRa0twaC9KWTAySG5CUysxL1loQmxZYzRFaWpxZzdDckU4WVdkQ3VEUktka3VaaGtrZElDClZadlFNdGdrdkZHQUhBMVBsWU1wazhxbEUzcHdhUksxdlZSeFgwY2FudEU2V0h6cEZwK0VhTGF0ckpBNG1yTWIKb1ZjSXkwc0RCb295YTdZdCt6M2RoY0xVSmdRMy9Dd2Urb1ozRDR0bmdtS3VmL3czOG5ud3p2VkE0QmF2N2VNOAo4SzNUbWtxVzRWdVVGZzNGNURBRHMvSEtqTm1Ia0tsRlJRM3JZVURxNzVOdU5vclN6SkxHWWNvYWpmNWh6VXV3CldPNnN3TGdYQWdNQkFBRUNnZ0VBU1BrWlBUZndQYjBmSi9FSFNHYlR4eUp1aUxCbjVDaFpYclRsWWM3eVBtTjIKaXRPTlFIU2pwbmJLRXd2MnZhNmVPV2JpS1psYkdEazVLajJUZjNyTmY0emFqQ09TdHlvNGhwdTU5MEVDdEF2ZApUUUhMSFV3YXcxcWlDaTZyQ1lQWXJtdnZGWG4vOURGN1hKNjRWZkVUaGx2UHk5VUkxZUVpdjRJTTQ4UUxtQytECjZ2dlNRUXQzc0dxVXJjTlFPejRROUQ1RmJMVThYWFFqZEhFVThWY1oyQlBSWU1tZHRwS0FVUUo2M3MxVDliR3kKaUhMWFdRb1BFWkpZN3lLWERjYmQ2T20zKzB4VXhEVFlmdTExdDk5dHBLaWFmM0plcFVHTEMvN0pwZ2hjN21seApmUWVhSWtoTGphSGZJalNBUWV6UTJqd1hmWGJib1AvNXp2dENhK0Q1QVFLQmdRRGtvYVhHZHBNNC9yUFgyTThaCk1wSlhBZEhvRE5CRDVWVXlqTnphRnBvTFFZa1lDWHdESHgvZnR5VDJHcnJibmdScEdzdndzWEVnS09NK3BGOGoKeVI0OG9LU1A3b1lpMFp5SUpKVERJZ2djczltMXRUVk1xd0NLNGVqTmNzZnV4ZGhma255U2l2bm45N0VnaFUrOQpQREpCamdRZys2NytIdEtBQmRzNG9mSnVnUUtCZ1FEZDM2ZGNvSEJoc1liYWlEM2cyVFdHTDhHeDNNWEI2dlQyCjFJamlISFg0U0xEQi9nMWlGUW5jQkkzM01SV3Q1ajhVc3dZWnE5bWdSZDlXNy95eDNLcFlYUU8rdDZKakcrVkQKVm43T2xPQ2s5Y3FCN0Vhbm9ZaFRmcXVSbGhYUkoxK1gyem9ITmJ6Zm0xbU5YNWtKeWdPZ1BlYy9PWmpZWnV1ZQpkWnFVUHpDS2x3S0JnUUNkVmRCckh6WjVSbTRrNEFMRTZGUmtvOC83ekxXVnc4QTVwVTVtbHA4emw4bnNUTHFXCnUxaUg3OVR6dDdsWFlRclArYXpXMm9WckNEZzU1NHZCYUtueU51cUlVakRrelRMWFRiVkNBNjFwY3lzakxLeWQKTWlDUmNwMXVwTXFKdW9VSVlCcHlEa21rSklRMFNRbHl4SFloUHNZQW5MTDBGZW51U0Jsd2c2SCtBUUtCZ0FxWgprOHBXRStobTdkVms2dWZpR0srSGVvNS95S2JpRGIvNUdXME1CUXJPbGpzK1l1OUxxQVB0NThabWZ1Mm04VjZRCnl2clEvZjJ0T09WVFk5UXZnYWZsRWJlNzJETTU4dk1MbjlWSXdZNi8rMlBIb1B5RE9Cc2RFeDlLZDFKZHVqQnMKRmRLZ1c1eW5ORmtQdHlVRGxMYkVIbGNqN2t4d1psTlV0UVRlRy9pVEFvR0FHb1d5QmJBeWcxbjhWcVBZMm9oTgpnR0FNRm13bzBtQjhWZXN2TFh4TzZubFlKMUI5RWRHVDNvR0FFSzFkc084ZzR1WXMwMkFQYWxEUnFoaUIzV2JGCnVPQ1dxRDljZ2Z6TGdWaXdkOHVLTXFhSHNOTjRWeXpIZ0UyYWY3QnlLa2czQWRBUXFDNk56ZXVvRzRhYm14OWcKTzRwVWFqZWdBd3Nic2lUektmYVpUNEE9Ci0tLS0tRU5EIFBSSVZBVEUgS0VZLS0tLS0K + + + + API + api + + https://github.com/jaredhendrickson13/pfsense-api + System + %%PKGVERSION%% + api.xml + jaredhendrickson13@gmail.com + + + + lan + jwt + json + e568b85aed939826ead0199aa4238e399c9e12806f4dc655f8ee4a74818a19a2 + 3600 + sha256 + 16 + + + + + + /api/ + + + + + /api/v1/ + + + + + + API + The missing REST API package for pfSense +
System
+ /api/ +
+
+ + + WANGW + + wan + ${IP_GATEWAY_WAN} + WANGW + + inet + + + + + 5 + enabled + + + auto + lan + yes + yes + + + + + + + + + + edison + ! + + + +
+ kind: ConfigMap + metadata: + name: pfsense-vm-files + + --- + apiVersion: v1 + data: + credentials: Q1JFRFM6IEZST01fU0VDUkVUX1JFRg== + kind: Secret + metadata: + name: account-creds + type: Opaque + --- + apiVersion: terraform.core.oam.dev/v1beta1 + kind: Provider + metadata: + name: vsphere-custom + spec: + provider: custom + credentials: + source: Secret + secretRef: + name: account-creds + key: credentials + --- + apiVersion: terraform.core.oam.dev/v1beta1 + kind: Configuration + metadata: + name: pfsense-gateway-vm-config + namespace: jet-system + spec: + providerRef: + name: vsphere-custom + namespace: jet-system + volumeSpec: + volumeMounts: + - name: spectro-directory + mountPath: /opt/spectrocloud + - mountPath: "/var/files" + name: files-vol + volumes: + - name: files-vol + configMap: + name: pfsense-vm-files + - name: spectro-directory + hostPath: + path: /opt/spectrocloud + type: Directory + deleteResource: true + variableRef: + - name: TF_VAR_VSPHERE_HOST + valueFrom: + secretKeyRef: + key: vcenterServer + name: vsphere-cloud-account + - name: TF_VAR_USERNAME + valueFrom: + secretKeyRef: + key: username + name: vsphere-cloud-account + - name: TF_VAR_PASSWORD + valueFrom: + secretKeyRef: + key: password + name: vsphere-cloud-account + - name: TF_VAR_INSECURE + valueFrom: + secretKeyRef: + key: insecure + name: vsphere-cloud-account + variable: + VM_NAME: "pfsense-gateway-vm" + NTP: FROM_SECRET_REF + DNS_1: FROM_SECRET_REF + DNS_2: FROM_SECRET_REF + IP_ADDR_WAN: FROM_SECRET_REF + IP_GATEWAY_WAN: FROM_SECRET_REF + SUBNET_WAN: FROM_SECRET_REF + IP_ADDR_LAN: FROM_SECRET_REF + SUBNET_LAN: FROM_SECRET_REF + DHCP_RANGE_START: FROM_SECRET_REF + DHCP_RANGE_END: FROM_SECRET_REF + DATACENTER: FROM_SECRET_REF + DATASTORE: FROM_SECRET_REF + RESOURCE_POOL: FROM_SECRET_REF + WAN_NETWORK: FROM_SECRET_REF + LAN_NETWORK: FROM_SECRET_REF + VM_TEMPLATE_NAME: FROM_SECRET_REF + FOLDER: FROM_SECRET_REF + hcl: | + provider "vsphere" { + user = var.USERNAME + password = var.PASSWORD + vsphere_server = var.VSPHERE_HOST + + allow_unverified_ssl = tobool(var.INSECURE) + } + + variable "USERNAME" { + type = string + } + + variable "PASSWORD" { + type = string + } + + variable "VSPHERE_HOST" { + type = string + } + + variable "INSECURE" { + type = string + default = "true" + } + + variable "NTP" { + type = string + } + + variable "DNS_1" { + type = string + } + + variable "DNS_2" { + type = string + } + + variable "IP_ADDR_WAN" { + type = string + } + + variable "SUBNET_WAN" { + type = string + } + + variable "IP_GATEWAY_WAN" { + type = string + } + + variable "IP_ADDR_LAN" { + type = string + } + + variable "SUBNET_LAN" { + type = string + } + + variable "DHCP_RANGE_START" { + type = string + } + + variable "DHCP_RANGE_END" { + type = string + } + + variable "DATACENTER" { + type = string + } + + variable "DATASTORE" { + type = string + } + + variable "RESOURCE_POOL" { + type = string + } + + variable "WAN_NETWORK" { + type = string + } + + variable "LAN_NETWORK" { + type = string + } + + variable "VM_TEMPLATE_NAME" { + type = string + } + + variable "VM_NAME" { + type = string + } + + variable "FOLDER" { + type = string + } + + ##### OUTPUT ##### + output "config_value" { + value = data.template_file.config.rendered + } + + output "user_data_value" { + value = data.template_file.user_data.rendered + } + + ##### PROVIDER ##### + data "template_file" "config" { + template = file("/var/files/config-xml.tmpl") + vars = { + NTP = var.NTP + DNS_1 = var.DNS_1 + DNS_2 = var.DNS_2 + IP_ADDR_WAN = var.IP_ADDR_WAN + SUBNET_WAN = var.SUBNET_WAN + IP_GATEWAY_WAN = var.IP_GATEWAY_WAN + IP_ADDR_LAN = var.IP_ADDR_LAN + SUBNET_LAN = var.SUBNET_LAN + DHCP_RANGE_START = var.DHCP_RANGE_START + DHCP_RANGE_END = var.DHCP_RANGE_END + } + } + + data "template_file" "user_data" { + template = file("/var/files/user-data.tmpl") + vars = { + CONFIG_XML = base64encode(data.template_file.config.rendered) + } + } + + data "vsphere_datacenter" "dc" { + name = var.DATACENTER + } + + data "vsphere_datastore" "datastore" { + name = var.DATASTORE + datacenter_id = data.vsphere_datacenter.dc.id + } + + data "vsphere_resource_pool" "pool" { + name = var.RESOURCE_POOL + datacenter_id = data.vsphere_datacenter.dc.id + } + + data "vsphere_network" "wan_network" { + name = var.WAN_NETWORK + datacenter_id = data.vsphere_datacenter.dc.id + } + + data "vsphere_network" "lan_network" { + name = var.LAN_NETWORK + datacenter_id = data.vsphere_datacenter.dc.id + } + + data "vsphere_virtual_machine" "template" { + name = var.VM_TEMPLATE_NAME + datacenter_id = data.vsphere_datacenter.dc.id + } + + resource "vsphere_virtual_machine" "vm" { + name = var.VM_NAME + resource_pool_id = data.vsphere_resource_pool.pool.id + datastore_id = data.vsphere_datastore.datastore.id + folder = var.FOLDER + + wait_for_guest_net_timeout = 0 + + num_cpus = 2 + memory = 4096 + guest_id = "freebsd12_64Guest" + scsi_type = "lsilogic" + + network_interface { + network_id = data.vsphere_network.lan_network.id + } + + network_interface { + network_id = data.vsphere_network.wan_network.id + } + + cdrom { + client_device = true + } + + disk { + label = var.VM_TEMPLATE_NAME + size = data.vsphere_virtual_machine.template.disks.0.size + eagerly_scrub = data.vsphere_virtual_machine.template.disks.0.eagerly_scrub + thin_provisioned = data.vsphere_virtual_machine.template.disks.0.thin_provisioned + } + + clone { + template_uuid = data.vsphere_virtual_machine.template.id + } + + extra_config = { + "guestinfo.userdata" = base64encode(data.template_file.user_data.rendered) + "guestinfo.userdata.encoding" = "base64" + "guestinfo.metadata" = <<-EOT + { + "instance-id": ${var.VM_NAME} + } + EOT + } + } diff --git a/examples/local-simplified-yaml/project-vsphere-with-creds-201/main.tf b/examples/local-simplified-yaml/project-vsphere-with-creds-201/main.tf new file mode 100644 index 0000000..073226b --- /dev/null +++ b/examples/local-simplified-yaml/project-vsphere-with-creds-201/main.tf @@ -0,0 +1,72 @@ +terraform { + required_version = ">= 0.14.0" + + required_providers { + spectrocloud = { + version = "= 0.6.10-pre" + source = "spectrocloud/spectrocloud" + } + } +} + +variable "sc_host" {} +variable "sc_api_key" { + sensitive = true +} +variable "sc_project_name" {} + +provider "spectrocloud" { + host = var.sc_host + api_key = var.sc_api_key + project_name = var.sc_project_name +} + +locals { + profiles = { + for k in fileset("config/profile", "profile-*.yaml") : + trimsuffix(k, ".yaml") => yamldecode(file("config/profile/${k}")) + } + + appliances = { + for k in fileset("config/appliance", "appliance-*.yaml") : + trimsuffix(k, ".yaml") => yamldecode(file("config/appliance/${k}")) + } + + clusters = { + for k in fileset("config/cluster", "cluster-*.yaml") : + trimsuffix(k, ".yaml") => yamldecode(file("config/cluster/${k}")) + } +} + +module "SpectroSystemProfile" { + source = "github.com/spectrocloud/terraform-spectrocloud-modules" + + # It is recommended to use latest version of module instead of using latest from github + #source = "spectrocloud/modules/spectrocloud" + #version = "0.0.7" + + profiles = local.profiles +} + +module "SpectroAppliance" { + depends_on = [module.SpectroSystemProfile] + source = "github.com/spectrocloud/terraform-spectrocloud-modules" + + # It is recommended to use latest version of module instead of using latest from github + #source = "spectrocloud/modules/spectrocloud" + #version = "0.0.7" + + appliances = local.appliances +} + +module "SpectroCluster" { + depends_on = [module.SpectroAppliance] + source = "github.com/spectrocloud/terraform-spectrocloud-modules" + //source = "/Users/rishi/work/git_clones/terraform-spectrocloud-modules" + + # It is recommended to use latest version of module instead of using latest from github + #source = "spectrocloud/modules/spectrocloud" + #version = "0.0.7" + + clusters = local.clusters +} diff --git a/examples/local-simplified-yaml/project-vsphere-with-creds-201/terraform.template.tfvars b/examples/local-simplified-yaml/project-vsphere-with-creds-201/terraform.template.tfvars new file mode 100644 index 0000000..3c4423e --- /dev/null +++ b/examples/local-simplified-yaml/project-vsphere-with-creds-201/terraform.template.tfvars @@ -0,0 +1,4 @@ +# Credentials +sc_host = "{enter Spectro Cloud host, blank for SaaS}" +sc_api_key = "{enter Spectro Cloud API Key}" +sc_project_name = "{enter Spectro Cloud Project Name}" \ No newline at end of file diff --git a/examples/local-simplified-yaml/project-vsphere-without-creds-202/README.md b/examples/local-simplified-yaml/project-vsphere-without-creds-202/README.md new file mode 100644 index 0000000..8502089 --- /dev/null +++ b/examples/local-simplified-yaml/project-vsphere-without-creds-202/README.md @@ -0,0 +1 @@ +Vsphere without creds appliance project \ No newline at end of file diff --git a/examples/local-simplified-yaml/project-vsphere-without-creds-202/config/appliance/appliance-hospital-202.yaml b/examples/local-simplified-yaml/project-vsphere-without-creds-202/config/appliance/appliance-hospital-202.yaml new file mode 100644 index 0000000..9627e8e --- /dev/null +++ b/examples/local-simplified-yaml/project-vsphere-without-creds-202/config/appliance/appliance-hospital-202.yaml @@ -0,0 +1 @@ +id: "hospital-202" diff --git a/examples/local-simplified-yaml/project-vsphere-without-creds-202/config/cluster/cluster-hospital-202.yaml b/examples/local-simplified-yaml/project-vsphere-without-creds-202/config/cluster/cluster-hospital-202.yaml new file mode 100644 index 0000000..b1d843a --- /dev/null +++ b/examples/local-simplified-yaml/project-vsphere-without-creds-202/config/cluster/cluster-hospital-202.yaml @@ -0,0 +1,26 @@ +name: hospital-202 +tags: +- "skip_completion" +cloudType: edge +profiles: + infra: + name: without-creds-infra + system: + name: hospital-202-system-profile + addons: + - name: spectro-core +cloud_config: + ssh_key: "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQCr3hE9IS5UUDPqNOiEWVJvVDS0v57QKjb1o9ubvvATQNg2T3x+inckfzfgX3et1H9X1oSp0FqY1+Mmy5nfTtTyIj5Get1cntcC4QqdZV8Op1tmpI01hYMj4lLn55WNaXgDt+35tJ47kWRr5RqTGV05MPNWN3klaVsePsqa+MgCjnLfCBiOz1tpBOgxqPNqtQPXh+/T/Ul6ZDUW/rySr9iNR9uGd04tYzD7wdTdvmZSRgWEre//IipNzMnnZC7El5KJCQn8ksF+DYY9eT9NtNFEMALTZC6hn8BnMc14zqxoJP/GNHftmig8TJC500Uofdr4OKTCRr1JwHS79Cx9LyZdAp/1D8mL6bIMyGOTPVQ8xUpmEYj77m1kdiCHCk22YtLyfUWuQ0SC+2p1soDoNfJUpmxcKboOTZsLq1HDCFrqSyLUWS1PrYZ/MzhsPrsDewB1iHLbYDt87r2odJOpxMO1vNWMOYontODdr5JPKBpCcd/noNyOy/m4Spntytfb/J3kM1oz3dpPfN0xXmC19uR1xHklmbtg1j784IMu7umI2ZCpUwLADAodkbxmbacdkp5I+1NFgrFamvnTjjQAvRexV31m4m9GielKFQ4tCCId2yagMBWRFn5taEhb3SKnRxBcAzaJLopUyErOtqxvSywGvb53v4MEShqBaQSUv4gHfw== spectro2022" +node_groups: + - name: master-pool + control_plane: true + count: 1 + placements: + - appliance: "hospital-202" + + - name: worker-pool + count: 3 + placements: + - appliance: "hospital-202" + + diff --git a/examples/local-simplified-yaml/project-vsphere-without-creds-202/config/profile/profile-system.yaml b/examples/local-simplified-yaml/project-vsphere-without-creds-202/config/profile/profile-system.yaml new file mode 100644 index 0000000..985a8fa --- /dev/null +++ b/examples/local-simplified-yaml/project-vsphere-without-creds-202/config/profile/profile-system.yaml @@ -0,0 +1,321 @@ +name: hospital-202-system-profile +description: system-profile +type: system +cloudType: all +packs: + - name: "fluentbit" + registry: Public Repo + type: "spectro" + layer: "addon" + version: "1.3.5" + values: | + pack: + #The namespace (on the target cluster) to install this chart + #When not found, a new namespace will be created + namespace: "fluent-bit" + content: + images: + - image: docker.io/fluent/fluent-bit:1.3.5 + + charts: + fluent-bit: + + fullnameOverride: "fluent-bit" + nameOverride: "" + on_minikube: false + + image: + fluent_bit: + repository: fluent/fluent-bit + tag: 1.3.5 + pullPolicy: Always + + # When enabled, exposes json and prometheus metrics on {{ .Release.Name }}-metrics service + metrics: + enabled: false + service: + # labels: + # key: value + annotations: {} + # In order for Prometheus to consume metrics automatically use the following annotations: + # prometheus.io/path: "/api/v1/metrics/prometheus" + # prometheus.io/port: "2020" + # prometheus.io/scrape: "true" + port: 2020 + type: ClusterIP + serviceMonitor: + enabled: false + additionalLabels: {} + # namespace: monitoring + # interval: 30s + # scrapeTimeout: 10s + + # When enabled, fluent-bit will keep track of tailing offsets across pod restarts. + trackOffsets: false + + ## PriorityClassName + ## Ref: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/#priorityclass + priorityClassName: "" + + backend: + type: es + forward: + host: fluentd + port: 24284 + tls: "off" + tls_verify: "on" + tls_debug: 1 + shared_key: + es: + host: elasticsearch-client + port: 9200 + # Elastic Index Name + index: fluent-bit + type: flb_type + logstash_prefix: fluent-bit + replace_dots: "On" + logstash_format: "On" + retry_limit: "False" + time_key: "@timestamp" + # Optional username credential for Elastic X-Pack access + http_user: + # Password for user defined in HTTP_User + http_passwd: + # Optional TLS encryption to ElasticSearch instance + tls: "off" + tls_verify: "on" + # TLS certificate for the Elastic (in PEM format). Use if tls=on and tls_verify=on. + tls_ca: "" + # TLS debugging levels = 1-4 + tls_debug: 1 + splunk: + host: 127.0.0.1 + port: 8088 + token: "" + send_raw: "on" + tls: "on" + tls_verify: "off" + tls_debug: 1 + message_key: "kubernetes" + stackdriver: {} + + ## + ## Ref: http://fluentbit.io/documentation/current/output/http.html + ## + http: + host: 127.0.0.1 + port: 80 + uri: "/" + http_user: + http_passwd: + tls: "off" + tls_verify: "on" + tls_debug: 1 + ## Specify the data format to be used in the HTTP request body + ## Can be either 'msgpack' or 'json' + format: msgpack + headers: [] + + parsers: + enabled: false + ## List the respective parsers in key: value format per entry + ## Regex required fields are name and regex. JSON and Logfmt required field + ## is name. + regex: [] + logfmt: [] + ## json parser config can be defined by providing an extraEntries field. + ## The following entry: + ## json: + ## - extraEntries: | + ## Decode_Field_As escaped log do_next + ## Decode_Field_As json log + ## + ## translates into + ## + ## Command | Decoder | Field | Optional Action | + ## ==============|===========|=======|===================| + ## Decode_Field_As escaped log do_next + ## Decode_Field_As json log + ## + json: [] + + env: [] + + ## Annotations to add to the DaemonSet's Pods + podAnnotations: {} + + ## By default there different 'files' provides in the config + ## (fluent-bit.conf, custom_parsers.conf). This defeats + ## changing a configmap (since it uses subPath). If this + ## variable is set, the user is assumed to have provided, + ## in 'existingConfigMap' the entire config (etc/*) of fluent-bit, + ## parsers and system config. In this case, no subPath is + ## used + fullConfigMap: false + + ## ConfigMap override where fullname is {{.Release.Name}}-{{.Values.existingConfigMap}} + ## Defining existingConfigMap will cause templates/config.yaml + ## to NOT generate a ConfigMap resource + ## + existingConfigMap: "" + + + # NOTE If you want to add extra sections, add them here, inbetween the includes, + # wherever they need to go. Sections order matters. + + rawConfig: |- + @INCLUDE fluent-bit-service.conf + @INCLUDE fluent-bit-input.conf + @INCLUDE fluent-bit-filter.conf + @INCLUDE fluent-bit-output.conf + + + # WARNING!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! + # This is to add extra entries to an existing section, NOT for adding new sections + # Do not submit bugs against indent being wrong. Add your new sections to rawConfig + # instead. + # + extraEntries: + input: |- + # # >=1 additional Key/Value entrie(s) for existing Input section + audit: |- + # # >=1 additional Key/Value entrie(s) for existing Input section + filter: |- + # # >=1 additional Key/Value entrie(s) for existing Filter section + output: |- + # # >=1 additional Key/Value entrie(s) for existing Ouput section + # WARNING!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! + + + ## Extra ports to add to the daemonset ports section + extraPorts: [] + + ## Extra volumes containing additional files required for fluent-bit to work + ## (eg. CA certificates) + ## Ref: https://kubernetes.io/docs/concepts/storage/volumes/ + ## + extraVolumes: [] + + ## Extra volume mounts for the fluent-bit pod. + ## Ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-volume-storage/ + ## + extraVolumeMounts: [] + + resources: + limits: + cpu: 200m + memory: 128Mi + requests: + cpu: 10m + memory: 8Mi + + # When enabled, pods will bind to the node's network namespace. + hostNetwork: false + + # Which DNS policy to use for the pod. + # Consider switching to 'ClusterFirstWithHostNet' when 'hostNetwork' is enabled. + dnsPolicy: ClusterFirst + + ## Node tolerations for fluent-bit scheduling to nodes with taints + ## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ + ## + # Fluent bit is configured to run on all master and worker nodes + tolerations: + - key: node-role.kubernetes.io/master + operator: Exists + effect: NoSchedule + - operator: "Exists" + effect: "NoExecute" + - operator: "Exists" + effect: "NoSchedule" + + ## Node labels for fluent-bit pod assignment + ## Ref: https://kubernetes.io/docs/user-guide/node-selection/ + ## + nodeSelector: {} + affinity: {} + + service: + flush: 1 + logLevel: info + + input: + tail: + memBufLimit: 5MB + parser: docker + path: /var/log/containers/*.log + ignore_older: "" + systemd: + enabled: false + filters: + systemdUnit: + - docker.service + - kubelet.service + - node-problem-detector.service + maxEntries: 1000 + readFromTail: true + stripUnderscores: false + tag: host.* + + audit: + enable: false + input: + memBufLimit: 35MB + parser: docker + path: /var/log/kube-apiserver-audit.log + bufferChunkSize: 2MB + bufferMaxSize: 10MB + skipLongLines: On + key: kubernetes-audit + + filter: + kubeURL: https://kubernetes.default.svc:443 + kubeCAFile: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + kubeTokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token + kubeTag: kube + kubeTagPrefix: kube.var.log.containers. + + # If true, check to see if the log field content is a JSON string map, if so, + # it append the map fields as part of the log structure. + mergeJSONLog: true + + # If set, all unpacked keys from mergeJSONLog (Merge_Log) will be packed under + # the key name specified on mergeLogKey (Merge_Log_Key) + mergeLogKey: "" + + # If true, enable the use of monitoring for a pod annotation of + # fluentbit.io/parser: parser_name. parser_name must be the name + # of a parser contained within parsers.conf + enableParser: true + + # If true, enable the use of monitoring for a pod annotation of + # fluentbit.io/exclude: true. If present, discard logs from that pod. + enableExclude: true + + rbac: + # Specifies whether RBAC resources should be created + create: true + # Specifies whether a PodSecurityPolicy should be created + pspEnabled: false + + taildb: + directory: /var/lib/fluent-bit + + serviceAccount: + # Specifies whether a ServiceAccount should be created + create: true + # The name of the ServiceAccount to use. + # If not set and create is true, a name is generated using the fullname template + name: + + ## Specifies security settings for a container + ## Ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-container + securityContext: {} + # securityContext: + # privileged: true + + ## Specifies security settings for a pod + ## Ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod + podSecurityContext: {} + # podSecurityContext: + # runAsUser: 1000 \ No newline at end of file diff --git a/examples/local-simplified-yaml/project-vsphere-without-creds-202/main.tf b/examples/local-simplified-yaml/project-vsphere-without-creds-202/main.tf new file mode 100644 index 0000000..073226b --- /dev/null +++ b/examples/local-simplified-yaml/project-vsphere-without-creds-202/main.tf @@ -0,0 +1,72 @@ +terraform { + required_version = ">= 0.14.0" + + required_providers { + spectrocloud = { + version = "= 0.6.10-pre" + source = "spectrocloud/spectrocloud" + } + } +} + +variable "sc_host" {} +variable "sc_api_key" { + sensitive = true +} +variable "sc_project_name" {} + +provider "spectrocloud" { + host = var.sc_host + api_key = var.sc_api_key + project_name = var.sc_project_name +} + +locals { + profiles = { + for k in fileset("config/profile", "profile-*.yaml") : + trimsuffix(k, ".yaml") => yamldecode(file("config/profile/${k}")) + } + + appliances = { + for k in fileset("config/appliance", "appliance-*.yaml") : + trimsuffix(k, ".yaml") => yamldecode(file("config/appliance/${k}")) + } + + clusters = { + for k in fileset("config/cluster", "cluster-*.yaml") : + trimsuffix(k, ".yaml") => yamldecode(file("config/cluster/${k}")) + } +} + +module "SpectroSystemProfile" { + source = "github.com/spectrocloud/terraform-spectrocloud-modules" + + # It is recommended to use latest version of module instead of using latest from github + #source = "spectrocloud/modules/spectrocloud" + #version = "0.0.7" + + profiles = local.profiles +} + +module "SpectroAppliance" { + depends_on = [module.SpectroSystemProfile] + source = "github.com/spectrocloud/terraform-spectrocloud-modules" + + # It is recommended to use latest version of module instead of using latest from github + #source = "spectrocloud/modules/spectrocloud" + #version = "0.0.7" + + appliances = local.appliances +} + +module "SpectroCluster" { + depends_on = [module.SpectroAppliance] + source = "github.com/spectrocloud/terraform-spectrocloud-modules" + //source = "/Users/rishi/work/git_clones/terraform-spectrocloud-modules" + + # It is recommended to use latest version of module instead of using latest from github + #source = "spectrocloud/modules/spectrocloud" + #version = "0.0.7" + + clusters = local.clusters +} diff --git a/examples/local-simplified-yaml/project-vsphere-without-creds-202/terraform.template.tfvars b/examples/local-simplified-yaml/project-vsphere-without-creds-202/terraform.template.tfvars new file mode 100644 index 0000000..3c4423e --- /dev/null +++ b/examples/local-simplified-yaml/project-vsphere-without-creds-202/terraform.template.tfvars @@ -0,0 +1,4 @@ +# Credentials +sc_host = "{enter Spectro Cloud host, blank for SaaS}" +sc_api_key = "{enter Spectro Cloud API Key}" +sc_project_name = "{enter Spectro Cloud Project Name}" \ No newline at end of file diff --git a/main.tf b/main.tf index 29f5a37..1b64ad7 100644 --- a/main.tf +++ b/main.tf @@ -1,7 +1,7 @@ terraform { required_providers { spectrocloud = { - version = "=0.6.10-pre" + version = "~> 0.6.12" source = "spectrocloud/spectrocloud" } } diff --git a/spectro-cluster-edge-vsphere.tf b/spectro-cluster-edge-vsphere.tf index 6cb5e84..c274d61 100644 --- a/spectro-cluster-edge-vsphere.tf +++ b/spectro-cluster-edge-vsphere.tf @@ -13,6 +13,40 @@ resource "spectrocloud_cluster_edge_vsphere" "this" { folder = each.value.cloud_config.folder } + dynamic "cluster_rbac_binding" { + for_each = try(each.value.cluster_rbac_binding, []) + content { + type = cluster_rbac_binding.value.type + + role = { + kind = cluster_rbac_binding.value.role.kind + name = cluster_rbac_binding.value.role.name + } + + dynamic "subjects" { + for_each = try(cluster_rbac_binding.value.subjects, []) + + content { + type = subjects.value.type + name = subjects.value.name + namespace = try(subjects.value.namespace, "") + } + } + } + } + + dynamic "namespaces" { + for_each = try(each.value.namespaces, []) + + content { + name = namespaces.value.name + resource_allocation = { + cpu_cores = try(namespaces.value.resource_allocation.cpu_cores, "") + memory_MiB = try(namespaces.value.resource_allocation.memory_MiB, "") + } + } + } + #infra profile cluster_profile { id = local.profile_map[each.value.profiles.infra.name].id @@ -98,6 +132,18 @@ resource "spectrocloud_cluster_edge_vsphere" "this" { control_plane_as_worker = try(machine_pool.value.control_plane_as_worker, false) count = machine_pool.value.count + additional_labels = try(machine_pool.value.additional_labels, tomap({})) + + dynamic "taints" { + for_each = try(machine_pool.value.taints, []) + + content { + key = taints.value.key + value = taints.value.value + effect = taints.value.effect + } + } + placement { cluster = machine_pool.value.placement.cluster resource_pool = machine_pool.value.placement.resource_pool diff --git a/spectro-cluster-edge.tf b/spectro-cluster-edge.tf index 6be4a85..f48429c 100644 --- a/spectro-cluster-edge.tf +++ b/spectro-cluster-edge.tf @@ -7,6 +7,40 @@ resource "spectrocloud_cluster_edge" "this" { ssh_key = try(each.value.cloud_config.ssh_key, "") } + dynamic "cluster_rbac_binding" { + for_each = try(each.value.cluster_rbac_binding, []) + content { + type = cluster_rbac_binding.value.type + + role = { + kind = cluster_rbac_binding.value.role.kind + name = cluster_rbac_binding.value.role.name + } + + dynamic "subjects" { + for_each = try(cluster_rbac_binding.value.subjects, []) + + content { + type = subjects.value.type + name = subjects.value.name + namespace = try(subjects.value.namespace, "") + } + } + } + } + + dynamic "namespaces" { + for_each = try(each.value.namespaces, []) + + content { + name = namespaces.value.name + resource_allocation = { + cpu_cores = try(namespaces.value.resource_allocation.cpu_cores, "") + memory_MiB = try(namespaces.value.resource_allocation.memory_MiB, "") + } + } + } + #infra profile cluster_profile { id = local.profile_map[each.value.profiles.infra.name].id @@ -92,6 +126,18 @@ resource "spectrocloud_cluster_edge" "this" { control_plane_as_worker = try(machine_pool.value.control_plane_as_worker, false) count = machine_pool.value.count + additional_labels = try(machine_pool.value.additional_labels, tomap({})) + + dynamic "taints" { + for_each = try(machine_pool.value.taints, []) + + content { + key = taints.value.key + value = taints.value.value + effect = taints.value.effect + } + } + dynamic "placements" { for_each = machine_pool.value.placements diff --git a/spectro-cluster-eks.tf b/spectro-cluster-eks.tf index cee1138..34b340c 100644 --- a/spectro-cluster-eks.tf +++ b/spectro-cluster-eks.tf @@ -62,6 +62,40 @@ resource "spectrocloud_cluster_eks" "this" { endpoint_access = each.value.cloud_config.endpoint_access } + dynamic "cluster_rbac_binding" { + for_each = try(each.value.cluster_rbac_binding, []) + content { + type = cluster_rbac_binding.value.type + + role = { + kind = cluster_rbac_binding.value.role.kind + name = cluster_rbac_binding.value.role.name + } + + dynamic "subjects" { + for_each = try(cluster_rbac_binding.value.subjects, []) + + content { + type = subjects.value.type + name = subjects.value.name + namespace = try(subjects.value.namespace, "") + } + } + } + } + + dynamic "namespaces" { + for_each = try(each.value.namespaces, []) + + content { + name = namespaces.value.name + resource_allocation = { + cpu_cores = try(namespaces.value.resource_allocation.cpu_cores, "") + memory_MiB = try(namespaces.value.resource_allocation.memory_MiB, "") + } + } + } + dynamic "machine_pool" { for_each = each.value.node_groups content { @@ -71,6 +105,18 @@ resource "spectrocloud_cluster_eks" "this" { az_subnets = machine_pool.value.worker_subnets disk_size_gb = machine_pool.value.disk_size_gb azs = [] + + additional_labels = try(machine_pool.value.additional_labels, tomap({})) + + dynamic "taints" { + for_each = try(machine_pool.value.taints, []) + + content { + key = taints.value.key + value = taints.value.value + effect = taints.value.effect + } + } } } diff --git a/spectro-cluster-libvirt.tf b/spectro-cluster-libvirt.tf index 7a97626..39a04ce 100644 --- a/spectro-cluster-libvirt.tf +++ b/spectro-cluster-libvirt.tf @@ -9,6 +9,40 @@ resource "spectrocloud_cluster_libvirt" "this" { ntp_servers = try(each.value.cloud_config.ntp_servers, []) } + dynamic "cluster_rbac_binding" { + for_each = try(each.value.cluster_rbac_binding, []) + content { + type = cluster_rbac_binding.value.type + + role = { + kind = cluster_rbac_binding.value.role.kind + name = cluster_rbac_binding.value.role.name + } + + dynamic "subjects" { + for_each = try(cluster_rbac_binding.value.subjects, []) + + content { + type = subjects.value.type + name = subjects.value.name + namespace = try(subjects.value.namespace, "") + } + } + } + } + + dynamic "namespaces" { + for_each = try(each.value.namespaces, []) + + content { + name = namespaces.value.name + resource_allocation = { + cpu_cores = try(namespaces.value.resource_allocation.cpu_cores, "") + memory_MiB = try(namespaces.value.resource_allocation.memory_MiB, "") + } + } + } + #infra profile cluster_profile { id = local.profile_map[each.value.profiles.infra.name].id @@ -94,6 +128,19 @@ resource "spectrocloud_cluster_libvirt" "this" { control_plane_as_worker = try(machine_pool.value.control_plane_as_worker, false) count = machine_pool.value.count + + additional_labels = try(machine_pool.value.additional_labels, tomap({})) + + dynamic "taints" { + for_each = try(machine_pool.value.taints, []) + + content { + key = taints.value.key + value = taints.value.value + effect = taints.value.effect + } + } + dynamic "placements" { for_each = machine_pool.value.placements diff --git a/spectro-cluster-tke.tf b/spectro-cluster-tke.tf index aa3969e..fe8cecc 100644 --- a/spectro-cluster-tke.tf +++ b/spectro-cluster-tke.tf @@ -58,10 +58,44 @@ resource "spectrocloud_cluster_tke" "this" { vpc_id = each.value.cloud_config.tke_vpc_id az_subnets = each.value.cloud_config.tke_subnets azs = [] - public_access_cidrs = [] + public_access_cidrs = try(each.value.public_access_cidrs, []) endpoint_access = each.value.cloud_config.endpoint_access } + dynamic "cluster_rbac_binding" { + for_each = try(each.value.cluster_rbac_binding, []) + content { + type = cluster_rbac_binding.value.type + + role = { + kind = cluster_rbac_binding.value.role.kind + name = cluster_rbac_binding.value.role.name + } + + dynamic "subjects" { + for_each = try(cluster_rbac_binding.value.subjects, []) + + content { + type = subjects.value.type + name = subjects.value.name + namespace = try(subjects.value.namespace, "") + } + } + } + } + + dynamic "namespaces" { + for_each = try(each.value.namespaces, []) + + content { + name = namespaces.value.name + resource_allocation = { + cpu_cores = try(namespaces.value.resource_allocation.cpu_cores, "") + memory_MiB = try(namespaces.value.resource_allocation.memory_MiB, "") + } + } + } + dynamic "machine_pool" { for_each = each.value.node_groups content { @@ -71,6 +105,18 @@ resource "spectrocloud_cluster_tke" "this" { az_subnets = machine_pool.value.worker_subnets disk_size_gb = machine_pool.value.disk_size_gb azs = [] + + additional_labels = try(machine_pool.value.additional_labels, tomap({})) + + dynamic "taints" { + for_each = try(machine_pool.value.taints, []) + + content { + key = taints.value.key + value = taints.value.value + effect = taints.value.effect + } + } } }