diff --git a/examples/edge-vsphere/config/profile/profile-system.yaml b/examples/edge-vsphere/config/profile/profile-system.yaml
deleted file mode 100644
index c7e4042..0000000
--- a/examples/edge-vsphere/config/profile/profile-system.yaml
+++ /dev/null
@@ -1,751 +0,0 @@
-name: system-profile-withcreds
-description: system-profile
-cloudType: all
-type: system
-packs:
- - name: "pfsense-vpsherewithcreds"
- type: manifest
- registry: Public Repo
- manifests:
- - name: pfsense-vpsherewithcreds-config
- content: |
- apiVersion: v1
- data:
- user-data.tmpl: |
- #cloud-config
- write_files:
- - encoding: base64
- content: $${CONFIG_XML}
- owner: root:wheel
- path: /cf/conf/config.xml
- permissions: '0644'
- config-xml.tmpl: |
-
-
- 21.5
-
-
- normal
- gateway
- edison
-
- all
-
- system
- 1998
-
-
- admins
-
- system
- 1999
- 0
- page-all
-
-
- admin
-
- system
- admins
- $2y$10$K3ejlPZsxpqhd63EzyNgf.qK4RDhlSvW2ln.gIq7kbkVdlRiZ1XB2
- 0
- user-shell-access
- 2
- c3NoLXJzYSBBQUFBQjNOemFDMXljMkVBQUFBREFRQUJBQUFCZ1FDdzR4OW43NEtsVnRqUnBBRUhSVWJldnI2dVBDa0NnYkpHSTBKTUFhdnJ3K2JlTmI1V0wyOE1nOFU0MzBiWnhJRWFHU05pSXQreDk2aGFPY0RjOEFaMjRDa0Uxb1ZUS2x6aExyQkJGc0hmaVpuMHRYK1dtZWIyaGZBcmxQRnk2ZXJaZHRVTTUrYUQ2VFBYZmR4M0VWT2FBRzNncTFhbS9lSzFBM1NPVTVjTnFZMExjcWpGVDc0dG03T0NNc1VFSkg4emlCKzQ3ZTh2TWtDZjVIcTJqdVd5RjU0NTNiYzRaRHBMcW1hSXZ0UGpyN3NyQllLczI2MWpxRFlOWEF1aytyTExoRUt4VmNuY1o1U1pDbDJyWGVIdlRnb3lqZE1IWjYvWTNKZXhuYmg0Q2dkUVlmUU9HQStyQ01mVy9MTmFBV2hCUUlWRzZ1QU5mSEhhMTZQVE1XQTQzdlhTdUFCQ0VRQmR5VVhMQi9WWFlmcklOWTJQTnNPOG1FenVWcFRSaEQrSnRpT3BFejVQTlNXZk9qSUpSYjBBSjVDR2pHb0pSczdrck5QKzZsUWlTL0k5TnV4OThURjRla2FsYkMrOG9wUGFnTEhuL2NxaG9sSm1qQXp6QzV1by9yY1VKcldUNEYvODNoYmpUdEhKT01KdEZXV2lZcURmUWVoOENGOUJoeGs9IHJvb3RAbG9jYWxob3N0
-
- pfSense.css
-
-
- user
- $2y$10$5i3toKDJLCdLS0w71m7TF.49vokuIneuA97Tq3cJYotKzeEbn0Lqu
-
- sysgwservice
-
- 2
-
-
- pfSense.css
- 2001
- user-shell-access
-
- 2000
- 2000
- $${NTP}
-
- http
-
- 602232b5962a3
- 2
- 28080
- 2
- pfSense.css
- 1e3f75;
-
-
- yes
-
-
-
- 400000
- hadp
- hadp
- hadp
-
- monthly
-
-
-
- enabled
- enabled
- 25812
-
- Etc/UTC
- 115200
- serial
-
-
-
-
- en_US
-
-
- $${DNS_1}
- $${DNS_2}
-
-
-
-
-
- vmx1
-
-
-
- 32
- $${IP_ADDR_WAN}
- $${SUBNET_WAN}
- WANGW
-
-
-
- vmx0
-
-
- $${IP_ADDR_LAN}
- $${SUBNET_LAN}
-
-
-
-
-
-
-
- $${DHCP_RANGE_START}
- $${DHCP_RANGE_END}
-
-
-
-
- 94670856
- 189341712
-
-
-
-
-
-
-
-
- hmac-md5
-
-
-
- allow
- gateway.edison
-
-
-
-
-
-
-
-
-
-
-
-
-
- ::1000
- ::2000
-
- assist
- medium
-
-
-
-
-
- public
-
-
-
-
-
- 1
-
-
-
- automatic
-
-
-
-
-
- 1626111739
- pass
- wan
- inet
-
-
-
-
-
-
-
-
-
- tcp
-
-
-
- 25812
-
-
-
-
- pass
- inet
-
- lan
- 0100000101
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
- -
- 1,31
- 0-5
- *
- *
- *
- root
- /usr/bin/nice -n20 adjkerntz -a
-
- -
- 1
- 3
- 1
- *
- *
- root
- /usr/bin/nice -n20 /etc/rc.update_bogons.sh
-
- -
- 1
- 1
- *
- *
- *
- root
- /usr/bin/nice -n20 /etc/rc.dyndns.update
-
- -
- */60
- *
- *
- *
- *
- root
- /usr/bin/nice -n20 /usr/local/sbin/expiretable -v -t 3600 virusprot
-
- -
- 30
- 12
- *
- *
- *
- root
- /usr/bin/nice -n20 /etc/rc.update_urltables
-
- -
- 1
- 0
- *
- *
- *
- root
- /usr/bin/nice -n20 /etc/rc.update_pkg_metadata
-
- -
- */1
- *
- *
- *
- *
- root
- /usr/sbin/newsyslog
-
- -
- 1
- 3
- *
- *
- *
- root
- /etc/rc.periodic daily
-
- -
- 15
- 4
- *
- *
- 6
- root
- /etc/rc.periodic weekly
-
- -
- 30
- 5
- 1
- *
- *
- root
- /etc/rc.periodic monthly
-
-
-
-
-
-
-
- system_information:col1:show,netgate_services_and_support:col2:show,interfaces:col2:show
- 10
-
-
-
-
- all
- all
-
-
-
-
-
- 602232b5962a3
-
-
-
- transparent
-
-
-
-
-
-
-
-
- 602232b5962a3
-
- server
- LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUVoRENDQTJ5Z0F3SUJBZ0lCQURBTkJna3Foa2lHOXcwQkFRc0ZBREJhTVRnd05nWURWUVFLRXk5d1psTmwKYm5ObElIZGxZa052Ym1acFozVnlZWFJ2Y2lCVFpXeG1MVk5wWjI1bFpDQkRaWEowYVdacFkyRjBaVEVlTUJ3RwpBMVVFQXhNVmNHWlRaVzV6WlMwMk1ESXlNekppTlRrMk1tRXpNQjRYRFRJeE1ESXdPVEEyTlRrd01Wb1hEVEl5Ck1ETXhOREEyTlRrd01Wb3dXakU0TURZR0ExVUVDaE12Y0daVFpXNXpaU0IzWldKRGIyNW1hV2QxY21GMGIzSWcKVTJWc1ppMVRhV2R1WldRZ1EyVnlkR2xtYVdOaGRHVXhIakFjQmdOVkJBTVRGWEJtVTJWdWMyVXROakF5TWpNeQpZalU1TmpKaE16Q0NBU0l3RFFZSktvWklodmNOQVFFQkJRQURnZ0VQQURDQ0FRb0NnZ0VCQU1ZblNtUG93OUt6Cml5b2ZVeW5ZaGN2YnN3YTVaVFVYTVVmZGdjaTlzamp2Ymw1eUhHZlhaY1BCdzRuUk14eUQ4enE0dEtuZ2EzSEkKNFNqbDJkamljdHM2c0loQ1FxbUg4bGpUWWVjRkw3WDlpRUdWaHpnU0tPcURzS3NUeGhaMEs0TkVwMlM1bUdTUgowZ0pWbTlBeTJDUzhVWUFjRFUrVmd5bVR5cVVUZW5CcEVyVzlWSEZmUnhxZTBUcFlmT2tXbjRSb3RxMnNrRGlhCnN4dWhWd2pMU3dNR2lqSnJ0aTM3UGQyRnd0UW1CRGY4TEI3NmhuY1BpMmVDWXE1Ly9EZnllZkRPOVVEZ0ZxL3QKNHp6d3JkT2FTcGJoVzVRV0RjWGtNQU96OGNxTTJZZVFxVVZGRGV0aFFPcnZrMjQyaXRMTWtzWmh5aHFOL21ITgpTN0JZN3F6QXVCY0NBd0VBQWFPQ0FWTXdnZ0ZQTUFrR0ExVWRFd1FDTUFBd0VRWUpZSVpJQVliNFFnRUJCQVFECkFnWkFNQXNHQTFVZER3UUVBd0lGb0RBekJnbGdoa2dCaHZoQ0FRMEVKaFlrVDNCbGJsTlRUQ0JIWlc1bGNtRjAKWldRZ1UyVnlkbVZ5SUVObGNuUnBabWxqWVhSbE1CMEdBMVVkRGdRV0JCUzY1eDhTQmJhY3FsN3gzL05UcEFqTgpPUlpaNmpDQmdnWURWUjBqQkhzd2VZQVV1dWNmRWdXMm5LcGU4ZC96VTZRSXpUa1dXZXFoWHFSY01Gb3hPREEyCkJnTlZCQW9UTDNCbVUyVnVjMlVnZDJWaVEyOXVabWxuZFhKaGRHOXlJRk5sYkdZdFUybG5ibVZrSUVObGNuUnAKWm1sallYUmxNUjR3SEFZRFZRUURFeFZ3WmxObGJuTmxMVFl3TWpJek1tSTFPVFl5WVRPQ0FRQXdKd1lEVlIwbApCQ0F3SGdZSUt3WUJCUVVIQXdFR0NDc0dBUVVGQndNQ0JnZ3JCZ0VGQlFnQ0FqQWdCZ05WSFJFRUdUQVhnaFZ3ClpsTmxibk5sTFRZd01qSXpNbUkxT1RZeVlUTXdEUVlKS29aSWh2Y05BUUVMQlFBRGdnRUJBTVFoWi9JNy8vbVAKSVh0dHo0Y0V3MlQ5SjdlSlpGQStSUy9UcStPUVlMU2tSWTNTQVh0cElYWWRhTkdFYWxuVDM4ZmZ6VllFb2hLawpKTXpTUURkUkxYYmZRS2oxZi9ZM1B0NFlOeFg2Q2lFTUI4cWFINFlOZmphVk1pdVloaUIwKzZiaW9Ic1plR2tECitQYlEzTzZ3cUg3aWVlMEM0Q2FHUUhWVFphQ2dOelBFUU9WYmdJc245Wm1UZlBETHF4OG9RZkRybm1ETzFwV1kKWHowMDlWTmRyb3VWNlVEc0xlSTd2VjAwQ3RNa2NFSU1ZTlBaSHliYjZ4Y3EzRU1BQjlYdG9BeklLYXdGWDA3bwo1bkR1T3dqZ0d2SzdqcHp1bmxpNm54TVNlYlpocjdXOWQra1ZUQU1Qbk5zd2ZsY0hvZXo5NXFYVGZzSjA3MXVzCmZYLzloSWZ6cHlzPQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0tCg==
- LS0tLS1CRUdJTiBQUklWQVRFIEtFWS0tLS0tCk1JSUV2UUlCQURBTkJna3Foa2lHOXcwQkFRRUZBQVNDQktjd2dnU2pBZ0VBQW9JQkFRREdKMHBqNk1QU3M0c3EKSDFNcDJJWEwyN01HdVdVMUZ6RkgzWUhJdmJJNDcyNWVjaHhuMTJYRHdjT0owVE1jZy9NNnVMU3A0R3R4eU9Fbwo1ZG5ZNG5MYk9yQ0lRa0twaC9KWTAySG5CUysxL1loQmxZYzRFaWpxZzdDckU4WVdkQ3VEUktka3VaaGtrZElDClZadlFNdGdrdkZHQUhBMVBsWU1wazhxbEUzcHdhUksxdlZSeFgwY2FudEU2V0h6cEZwK0VhTGF0ckpBNG1yTWIKb1ZjSXkwc0RCb295YTdZdCt6M2RoY0xVSmdRMy9Dd2Urb1ozRDR0bmdtS3VmL3czOG5ud3p2VkE0QmF2N2VNOAo4SzNUbWtxVzRWdVVGZzNGNURBRHMvSEtqTm1Ia0tsRlJRM3JZVURxNzVOdU5vclN6SkxHWWNvYWpmNWh6VXV3CldPNnN3TGdYQWdNQkFBRUNnZ0VBU1BrWlBUZndQYjBmSi9FSFNHYlR4eUp1aUxCbjVDaFpYclRsWWM3eVBtTjIKaXRPTlFIU2pwbmJLRXd2MnZhNmVPV2JpS1psYkdEazVLajJUZjNyTmY0emFqQ09TdHlvNGhwdTU5MEVDdEF2ZApUUUhMSFV3YXcxcWlDaTZyQ1lQWXJtdnZGWG4vOURGN1hKNjRWZkVUaGx2UHk5VUkxZUVpdjRJTTQ4UUxtQytECjZ2dlNRUXQzc0dxVXJjTlFPejRROUQ1RmJMVThYWFFqZEhFVThWY1oyQlBSWU1tZHRwS0FVUUo2M3MxVDliR3kKaUhMWFdRb1BFWkpZN3lLWERjYmQ2T20zKzB4VXhEVFlmdTExdDk5dHBLaWFmM0plcFVHTEMvN0pwZ2hjN21seApmUWVhSWtoTGphSGZJalNBUWV6UTJqd1hmWGJib1AvNXp2dENhK0Q1QVFLQmdRRGtvYVhHZHBNNC9yUFgyTThaCk1wSlhBZEhvRE5CRDVWVXlqTnphRnBvTFFZa1lDWHdESHgvZnR5VDJHcnJibmdScEdzdndzWEVnS09NK3BGOGoKeVI0OG9LU1A3b1lpMFp5SUpKVERJZ2djczltMXRUVk1xd0NLNGVqTmNzZnV4ZGhma255U2l2bm45N0VnaFUrOQpQREpCamdRZys2NytIdEtBQmRzNG9mSnVnUUtCZ1FEZDM2ZGNvSEJoc1liYWlEM2cyVFdHTDhHeDNNWEI2dlQyCjFJamlISFg0U0xEQi9nMWlGUW5jQkkzM01SV3Q1ajhVc3dZWnE5bWdSZDlXNy95eDNLcFlYUU8rdDZKakcrVkQKVm43T2xPQ2s5Y3FCN0Vhbm9ZaFRmcXVSbGhYUkoxK1gyem9ITmJ6Zm0xbU5YNWtKeWdPZ1BlYy9PWmpZWnV1ZQpkWnFVUHpDS2x3S0JnUUNkVmRCckh6WjVSbTRrNEFMRTZGUmtvOC83ekxXVnc4QTVwVTVtbHA4emw4bnNUTHFXCnUxaUg3OVR6dDdsWFlRclArYXpXMm9WckNEZzU1NHZCYUtueU51cUlVakRrelRMWFRiVkNBNjFwY3lzakxLeWQKTWlDUmNwMXVwTXFKdW9VSVlCcHlEa21rSklRMFNRbHl4SFloUHNZQW5MTDBGZW51U0Jsd2c2SCtBUUtCZ0FxWgprOHBXRStobTdkVms2dWZpR0srSGVvNS95S2JpRGIvNUdXME1CUXJPbGpzK1l1OUxxQVB0NThabWZ1Mm04VjZRCnl2clEvZjJ0T09WVFk5UXZnYWZsRWJlNzJETTU4dk1MbjlWSXdZNi8rMlBIb1B5RE9Cc2RFeDlLZDFKZHVqQnMKRmRLZ1c1eW5ORmtQdHlVRGxMYkVIbGNqN2t4d1psTlV0UVRlRy9pVEFvR0FHb1d5QmJBeWcxbjhWcVBZMm9oTgpnR0FNRm13bzBtQjhWZXN2TFh4TzZubFlKMUI5RWRHVDNvR0FFSzFkc084ZzR1WXMwMkFQYWxEUnFoaUIzV2JGCnVPQ1dxRDljZ2Z6TGdWaXdkOHVLTXFhSHNOTjRWeXpIZ0UyYWY3QnlLa2czQWRBUXFDNk56ZXVvRzRhYm14OWcKTzRwVWFqZWdBd3Nic2lUektmYVpUNEE9Ci0tLS0tRU5EIFBSSVZBVEUgS0VZLS0tLS0K
-
-
-
- API
- api
-
- https://github.com/jaredhendrickson13/pfsense-api
- System
- %%PKGVERSION%%
- api.xml
- jaredhendrickson13@gmail.com
-
-
-
- lan
- jwt
- json
- e568b85aed939826ead0199aa4238e399c9e12806f4dc655f8ee4a74818a19a2
- 3600
- sha256
- 16
-
-
-
-
-
- /api/
-
-
-
-
- /api/v1/
-
-
-
-
-
-
-
-
- WANGW
-
- wan
- $${IP_GATEWAY_WAN}
- WANGW
-
- inet
-
-
-
-
- 5
- enabled
-
-
- auto
- lan
- yes
- yes
-
-
-
-
-
-
-
-
-
- edison
- !
-
-
-
-
- kind: ConfigMap
- metadata:
- name: pfsense-vm-files
-
- ---
- apiVersion: v1
- data:
- credentials: Q1JFRFM6IEZST01fU0VDUkVUX1JFRg==
- kind: Secret
- metadata:
- name: account-creds
- type: Opaque
- ---
- apiVersion: terraform.core.oam.dev/v1beta1
- kind: Provider
- metadata:
- name: vsphere-custom
- spec:
- provider: custom
- credentials:
- source: Secret
- secretRef:
- name: account-creds
- key: credentials
- ---
- apiVersion: terraform.core.oam.dev/v1beta1
- kind: Configuration
- metadata:
- name: pfsense-gateway-vm-config
- namespace: jet-system
- spec:
- providerRef:
- name: vsphere-custom
- namespace: jet-system
- volumeSpec:
- volumeMounts:
- - name: spectro-directory
- mountPath: /opt/spectrocloud
- - mountPath: "/var/files"
- name: files-vol
- volumes:
- - name: files-vol
- configMap:
- name: pfsense-vm-files
- - name: spectro-directory
- hostPath:
- path: /opt/spectrocloud
- type: Directory
- deleteResource: true
- variableRef:
- - name: TF_VAR_VSPHERE_HOST
- valueFrom:
- secretKeyRef:
- key: vcenterServer
- name: vsphere-cloud-account
- - name: TF_VAR_USERNAME
- valueFrom:
- secretKeyRef:
- key: username
- name: vsphere-cloud-account
- - name: TF_VAR_PASSWORD
- valueFrom:
- secretKeyRef:
- key: password
- name: vsphere-cloud-account
- - name: TF_VAR_INSECURE
- valueFrom:
- secretKeyRef:
- key: insecure
- name: vsphere-cloud-account
- variable:
- VM_NAME: "pfsense-gateway-vm"
- NTP: FROM_SECRET_REF
- DNS_1: FROM_SECRET_REF
- DNS_2: FROM_SECRET_REF
- IP_ADDR_WAN: FROM_SECRET_REF
- IP_GATEWAY_WAN: FROM_SECRET_REF
- SUBNET_WAN: FROM_SECRET_REF
- IP_ADDR_LAN: FROM_SECRET_REF
- SUBNET_LAN: FROM_SECRET_REF
- DHCP_RANGE_START: FROM_SECRET_REF
- DHCP_RANGE_END: FROM_SECRET_REF
- DATACENTER: FROM_SECRET_REF
- DATASTORE: FROM_SECRET_REF
- RESOURCE_POOL: FROM_SECRET_REF
- WAN_NETWORK: FROM_SECRET_REF
- LAN_NETWORK: FROM_SECRET_REF
- VM_TEMPLATE_NAME: FROM_SECRET_REF
- FOLDER: FROM_SECRET_REF
- hcl: |
- provider "vsphere" {
- user = var.USERNAME
- password = var.PASSWORD
- vsphere_server = var.VSPHERE_HOST
-
- allow_unverified_ssl = tobool(var.INSECURE)
- }
-
- variable "USERNAME" {
- type = string
- }
-
- variable "PASSWORD" {
- type = string
- }
-
- variable "VSPHERE_HOST" {
- type = string
- }
-
- variable "INSECURE" {
- type = string
- default = "true"
- }
-
- variable "NTP" {
- type = string
- }
-
- variable "DNS_1" {
- type = string
- }
-
- variable "DNS_2" {
- type = string
- }
-
- variable "IP_ADDR_WAN" {
- type = string
- }
-
- variable "SUBNET_WAN" {
- type = string
- }
-
- variable "IP_GATEWAY_WAN" {
- type = string
- }
-
- variable "IP_ADDR_LAN" {
- type = string
- }
-
- variable "SUBNET_LAN" {
- type = string
- }
-
- variable "DHCP_RANGE_START" {
- type = string
- }
-
- variable "DHCP_RANGE_END" {
- type = string
- }
-
- variable "DATACENTER" {
- type = string
- }
-
- variable "DATASTORE" {
- type = string
- }
-
- variable "RESOURCE_POOL" {
- type = string
- }
-
- variable "WAN_NETWORK" {
- type = string
- }
-
- variable "LAN_NETWORK" {
- type = string
- }
-
- variable "VM_TEMPLATE_NAME" {
- type = string
- }
-
- variable "VM_NAME" {
- type = string
- }
-
- variable "FOLDER" {
- type = string
- }
-
- ##### OUTPUT #####
- output "config_value" {
- value = data.template_file.config.rendered
- }
-
- output "user_data_value" {
- value = data.template_file.user_data.rendered
- }
-
- ##### PROVIDER #####
- data "template_file" "config" {
- template = file("/var/files/config-xml.tmpl")
- vars = {
- NTP = var.NTP
- DNS_1 = var.DNS_1
- DNS_2 = var.DNS_2
- IP_ADDR_WAN = var.IP_ADDR_WAN
- SUBNET_WAN = var.SUBNET_WAN
- IP_GATEWAY_WAN = var.IP_GATEWAY_WAN
- IP_ADDR_LAN = var.IP_ADDR_LAN
- SUBNET_LAN = var.SUBNET_LAN
- DHCP_RANGE_START = var.DHCP_RANGE_START
- DHCP_RANGE_END = var.DHCP_RANGE_END
- }
- }
-
- data "template_file" "user_data" {
- template = file("/var/files/user-data.tmpl")
- vars = {
- CONFIG_XML = base64encode(data.template_file.config.rendered)
- }
- }
-
- data "vsphere_datacenter" "dc" {
- name = var.DATACENTER
- }
-
- data "vsphere_datastore" "datastore" {
- name = var.DATASTORE
- datacenter_id = data.vsphere_datacenter.dc.id
- }
-
- data "vsphere_resource_pool" "pool" {
- name = var.RESOURCE_POOL
- datacenter_id = data.vsphere_datacenter.dc.id
- }
-
- data "vsphere_network" "wan_network" {
- name = var.WAN_NETWORK
- datacenter_id = data.vsphere_datacenter.dc.id
- }
-
- data "vsphere_network" "lan_network" {
- name = var.LAN_NETWORK
- datacenter_id = data.vsphere_datacenter.dc.id
- }
-
- data "vsphere_virtual_machine" "template" {
- name = var.VM_TEMPLATE_NAME
- datacenter_id = data.vsphere_datacenter.dc.id
- }
-
- resource "vsphere_virtual_machine" "vm" {
- name = var.VM_NAME
- resource_pool_id = data.vsphere_resource_pool.pool.id
- datastore_id = data.vsphere_datastore.datastore.id
- folder = var.FOLDER
-
- wait_for_guest_net_timeout = 0
-
- num_cpus = 2
- memory = 4096
- guest_id = "freebsd12_64Guest"
- scsi_type = "lsilogic"
-
- network_interface {
- network_id = data.vsphere_network.lan_network.id
- }
-
- network_interface {
- network_id = data.vsphere_network.wan_network.id
- }
-
- cdrom {
- client_device = true
- }
-
- disk {
- label = var.VM_TEMPLATE_NAME
- size = data.vsphere_virtual_machine.template.disks.0.size
- eagerly_scrub = data.vsphere_virtual_machine.template.disks.0.eagerly_scrub
- thin_provisioned = data.vsphere_virtual_machine.template.disks.0.thin_provisioned
- }
-
- clone {
- template_uuid = data.vsphere_virtual_machine.template.id
- }
-
- extra_config = {
- "guestinfo.userdata" = base64encode(data.template_file.user_data.rendered)
- "guestinfo.userdata.encoding" = "base64"
- "guestinfo.metadata" = <<-EOT
- {
- "instance-id": $${var.VM_NAME}
- }
- EOT
- }
- }
diff --git a/examples/gitops/admin/README.md b/examples/gitops/admin/README.md
deleted file mode 100644
index 881d0e4..0000000
--- a/examples/gitops/admin/README.md
+++ /dev/null
@@ -1,6 +0,0 @@
-# gitops-admin
-
-Copy `terraform.template.tfvars` file to `terraform.tfvars` and key in Spectro Cloud credentials and other details.
-Make sure all files for project, profile have correct values and then run terraform.
-
-While copying files, just copy files present in this repo.
\ No newline at end of file
diff --git a/examples/gitops/admin/config/profile/profile-bm-infra.yaml b/examples/gitops/admin/config/profile/profile-bm-infra.yaml
deleted file mode 100644
index 5dd1406..0000000
--- a/examples/gitops/admin/config/profile/profile-bm-infra.yaml
+++ /dev/null
@@ -1,601 +0,0 @@
-name: bm-infra
-description: ""
-type: cluster
-cloudType: libvirt
-packs:
- - name: ubuntu-libvirt
- type: spectro
- layer: os
- registry: Public Repo
- version: 20.04
- tag: 20.04
- values: |-
- # Spectro Golden images includes most of the hardening standards recommended by CIS benchmarking v1.5
-
- # Uncomment below section to
- # 1. Include custom files to be copied over to the nodes and/or
- # 2. Execute list of commands before or after kubeadm init/join is executed
- #
- kubeadmconfig:
- preKubeadmCommands:
- - echo "Executing pre kube admin config commands"
- - update-ca-certificates
- postKubeadmCommands:
- - echo "Executing post kube admin config commands"
- - mkdir -p /etc/containerd/conf.d/
- files:
- - targetPath: /usr/local/share/ca-certificates/ca.crt
- targetOwner: "root:root"
- targetPermissions: "0644"
- content: |
- -----BEGIN CERTIFICATE-----
- MIIDozCCAougAwIBAgIQeO8XlqAMLhxvtCap35yktzANBgkqhkiG9w0BAQsFADBS
- PLACE YOUR CERT HERE
- mwJmHm9AIFoqBi8XpuhGmQ0nvymurEk=
- -----END CERTIFICATE-----
- - name: kubernetes
- type: spectro
- layer: k8s
- registry: Public Repo
- version: 1.21.8
- tag: 1.21.8
- values: |-
- pack:
- k8sHardening: True
- #CIDR Range for Pods in cluster
- # Note : This must not overlap with any of the host or service network
- podCIDR: "172.10.0.0/16"
- #CIDR notation IP range from which to assign service cluster IPs
- # Note : This must not overlap with any IP ranges assigned to nodes for pods.
- serviceClusterIpRange: "11.0.0.0/22"
-
- # KubeAdm customization for kubernetes hardening. Below config will be ignored if k8sHardening property above is disabled
- kubeadmconfig:
- apiServer:
- certSANs:
- - "cluster-{{ .spectro.system.cluster.uid }}.{{ .spectro.system.reverseproxy.server }}"
- extraArgs:
- # Note : secure-port flag is used during kubeadm init. Do not change this flag on a running cluster
- secure-port: "6443"
- anonymous-auth: "true"
- insecure-port: "0"
- profiling: "false"
- disable-admission-plugins: "AlwaysAdmit"
- default-not-ready-toleration-seconds: "60"
- default-unreachable-toleration-seconds: "60"
- enable-admission-plugins: "NamespaceLifecycle,ServiceAccount,NodeRestriction,PodSecurityPolicy"
- audit-log-path: /var/log/apiserver/audit.log
- audit-policy-file: /etc/kubernetes/audit-policy.yaml
- audit-log-maxage: "30"
- audit-log-maxbackup: "10"
- audit-log-maxsize: "100"
- authorization-mode: RBAC,Node
- tls-cipher-suites: "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256"
- extraVolumes:
- - name: audit-log
- hostPath: /var/log/apiserver
- mountPath: /var/log/apiserver
- pathType: DirectoryOrCreate
- - name: audit-policy
- hostPath: /etc/kubernetes/audit-policy.yaml
- mountPath: /etc/kubernetes/audit-policy.yaml
- readOnly: true
- pathType: File
- controllerManager:
- extraArgs:
- profiling: "false"
- terminated-pod-gc-threshold: "25"
- pod-eviction-timeout: "1m0s"
- use-service-account-credentials: "true"
- feature-gates: "RotateKubeletServerCertificate=true"
- scheduler:
- extraArgs:
- profiling: "false"
- kubeletExtraArgs:
- read-only-port : "0"
- event-qps: "0"
- feature-gates: "RotateKubeletServerCertificate=true"
- protect-kernel-defaults: "true"
- tls-cipher-suites: "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256"
- files:
- - path: hardening/audit-policy.yaml
- targetPath: /etc/kubernetes/audit-policy.yaml
- targetOwner: "root:root"
- targetPermissions: "0600"
- - path: hardening/privileged-psp.yaml
- targetPath: /etc/kubernetes/hardening/privileged-psp.yaml
- targetOwner: "root:root"
- targetPermissions: "0600"
- - path: hardening/90-kubelet.conf
- targetPath: /etc/sysctl.d/90-kubelet.conf
- targetOwner: "root:root"
- targetPermissions: "0600"
- preKubeadmCommands:
- # For enabling 'protect-kernel-defaults' flag to kubelet, kernel parameters changes are required
- - 'echo "====> Applying kernel parameters for Kubelet"'
- - 'sysctl -p /etc/sysctl.d/90-kubelet.conf'
- postKubeadmCommands:
- # Apply the privileged PodSecurityPolicy on the first master node ; Otherwise, CNI (and other) pods won't come up
- # Sometimes api server takes a little longer to respond. Retry if applying the pod-security-policy manifest fails
- - 'export KUBECONFIG=/etc/kubernetes/admin.conf && [ -f "$KUBECONFIG" ] && { echo " ====> Applying PodSecurityPolicy" ; until $(kubectl apply -f /etc/kubernetes/hardening/privileged-psp.yaml > /dev/null ); do echo "Failed to apply PodSecurityPolicies, will retry in 5s" ; sleep 5 ; done ; } || echo "Skipping PodSecurityPolicy for worker nodes"'
-
- # Client configuration to add OIDC based authentication flags in kubeconfig
- #clientConfig:
- #oidc-issuer-url: "{{ .spectro.pack.kubernetes.kubeadmconfig.apiServer.extraArgs.oidc-issuer-url }}"
- #oidc-client-id: "{{ .spectro.pack.kubernetes.kubeadmconfig.apiServer.extraArgs.oidc-client-id }}"
- #oidc-client-secret: 1gsranjjmdgahm10j8r6m47ejokm9kafvcbhi3d48jlc3rfpprhv
- #oidc-extra-scope: profile,email
- - name: cni-calico
- type: spectro
- layer: cni
- registry: Public Repo
- version: 3.19.0
- tag: 3.19.0
- values: |-
- pack:
- content:
- images:
- - gcr.io/spectro-images-public/calico/kube-controllers:v3.19.0
- - gcr.io/spectro-images-public/calico/node:v3.19.0
- - gcr.io/spectro-images-public/calico/cni:v3.19.0
- - gcr.io/spectro-images-public/calico/pod2daemon-flexvol:v3.19.0
-
- manifests:
- calico:
-
- # IPAM type to use. Supported types are calico-ipam, host-local
- ipamType: "calico-ipam"
-
- # Should be one of CALICO_IPV4POOL_IPIP or CALICO_IPV4POOL_VXLAN
- encapsulationType: "CALICO_IPV4POOL_IPIP"
-
- # Should be one of Always, CrossSubnet, Never
- encapsulationMode: "Always"
- - name: csi-rook-ceph
- type: spectro
- layer: csi
- registry: Public Repo
- version: 1.8.0
- tag: 1.8.0
- values: |-
- pack:
- content:
- images:
- - k8s.gcr.io/sig-storage/csi-node-driver-registrar:v2.3.0
- - k8s.gcr.io/sig-storage/csi-attacher:v3.3.0
- - k8s.gcr.io/sig-storage/csi-snapshotter:v4.2.0
- - k8s.gcr.io/sig-storage/csi-resizer:v1.3.0
- - k8s.gcr.io/sig-storage/csi-provisioner:v3.0.0
- - quay.io/cephcsi/cephcsi:v3.4.0
- - quay.io/ceph/ceph:v16.2.7
- - docker.io/rook/ceph:v1.8.0
-
- manifests:
- storageclass:
- contents: |
- apiVersion: ceph.rook.io/v1
- kind: CephFilesystem
- metadata:
- name: myfs
- namespace: rook-ceph # namespace:cluster
- spec:
- # The metadata pool spec. Must use replication.
- metadataPool:
- replicated:
- size: 3
- requireSafeReplicaSize: true
- parameters:
- # Inline compression mode for the data pool
- # Further reference: https://docs.ceph.com/docs/master/rados/configuration/bluestore-config-ref/#inline-compression
- compression_mode:
- none
- # gives a hint (%) to Ceph in terms of expected consumption of the total cluster capacity of a given pool
- # for more info: https://docs.ceph.com/docs/master/rados/operations/placement-groups/#specifying-expected-pool-size
- #target_size_ratio: ".5"
- # The list of data pool specs. Can use replication or erasure coding.
- dataPools:
- - name: replicated
- failureDomain: host
- replicated:
- size: 3
- # Disallow setting pool with replica 1, this could lead to data loss without recovery.
- # Make sure you're *ABSOLUTELY CERTAIN* that is what you want
- requireSafeReplicaSize: true
- parameters:
- # Inline compression mode for the data pool
- # Further reference: https://docs.ceph.com/docs/master/rados/configuration/bluestore-config-ref/#inline-compression
- compression_mode:
- none
- # gives a hint (%) to Ceph in terms of expected consumption of the total cluster capacity of a given pool
- # for more info: https://docs.ceph.com/docs/master/rados/operations/placement-groups/#specifying-expected-pool-size
- #target_size_ratio: ".5"
- # Whether to preserve filesystem after CephFilesystem CRD deletion
- preserveFilesystemOnDelete: true
- # The metadata service (mds) configuration
- metadataServer:
- # The number of active MDS instances
- activeCount: 1
- # Whether each active MDS instance will have an active standby with a warm metadata cache for faster failover.
- # If false, standbys will be available, but will not have a warm cache.
- activeStandby: true
- # The affinity rules to apply to the mds deployment
- placement:
- # nodeAffinity:
- # requiredDuringSchedulingIgnoredDuringExecution:
- # nodeSelectorTerms:
- # - matchExpressions:
- # - key: role
- # operator: In
- # values:
- # - mds-node
- # topologySpreadConstraints:
- # tolerations:
- # - key: mds-node
- # operator: Exists
- # podAffinity:
- podAntiAffinity:
- requiredDuringSchedulingIgnoredDuringExecution:
- - labelSelector:
- matchExpressions:
- - key: app
- operator: In
- values:
- - rook-ceph-mds
- # topologyKey: kubernetes.io/hostname will place MDS across different hosts
- topologyKey: kubernetes.io/hostname
- preferredDuringSchedulingIgnoredDuringExecution:
- - weight: 100
- podAffinityTerm:
- labelSelector:
- matchExpressions:
- - key: app
- operator: In
- values:
- - rook-ceph-mds
- # topologyKey: */zone can be used to spread MDS across different AZ
- # Use in k8s cluster if your cluster is v1.16 or lower
- # Use in k8s cluster is v1.17 or upper
- topologyKey: topology.kubernetes.io/zone
- # A key/value list of annotations
- annotations:
- # key: value
- # A key/value list of labels
- labels:
- # key: value
- resources:
- # The requests and limits set here, allow the filesystem MDS Pod(s) to use half of one CPU core and 1 gigabyte of memory
- # limits:
- # cpu: "500m"
- # memory: "1024Mi"
- # requests:
- # cpu: "500m"
- # memory: "1024Mi"
- # priorityClassName: my-priority-class
- # Filesystem mirroring settings
- # mirroring:
- # enabled: true
- # list of Kubernetes Secrets containing the peer token
- # for more details see: https://docs.ceph.com/en/latest/dev/cephfs-mirroring/#bootstrap-peers
- # peers:
- #secretNames:
- #- secondary-cluster-peer
- # specify the schedule(s) on which snapshots should be taken
- # see the official syntax here https://docs.ceph.com/en/latest/cephfs/snap-schedule/#add-and-remove-schedules
- # snapshotSchedules:
- # - path: /
- # interval: 24h # daily snapshots
- # startTime: 11:55
- # manage retention policies
- # see syntax duration here https://docs.ceph.com/en/latest/cephfs/snap-schedule/#add-and-remove-retention-policies
- # snapshotRetention:
- # - path: /
- # duration: "h 24"
- ---
- apiVersion: storage.k8s.io/v1
- kind: StorageClass
- metadata:
- name: standard
- annotations:
- storageclass.kubernetes.io/is-default-class: "true"
- # Change "rook-ceph" provisioner prefix to match the operator namespace if needed
- provisioner: rook-ceph.cephfs.csi.ceph.com # driver:namespace:operator
- parameters:
- # clusterID is the namespace where the rook cluster is running
- # If you change this namespace, also change the namespace below where the secret namespaces are defined
- clusterID: rook-ceph # namespace:cluster
-
- # CephFS filesystem name into which the volume shall be created
- fsName: myfs
-
- # Ceph pool into which the volume shall be created
- # Required for provisionVolume: "true"
- pool: myfs-data0
-
- # The secrets contain Ceph admin credentials. These are generated automatically by the operator
- # in the same namespace as the cluster.
- csi.storage.k8s.io/provisioner-secret-name: rook-csi-cephfs-provisioner
- csi.storage.k8s.io/provisioner-secret-namespace: rook-ceph # namespace:cluster
- csi.storage.k8s.io/controller-expand-secret-name: rook-csi-cephfs-provisioner
- csi.storage.k8s.io/controller-expand-secret-namespace: rook-ceph # namespace:cluster
- csi.storage.k8s.io/node-stage-secret-name: rook-csi-cephfs-node
- csi.storage.k8s.io/node-stage-secret-namespace: rook-ceph # namespace:cluster
-
- # (optional) The driver can use either ceph-fuse (fuse) or ceph kernel client (kernel)
- # If omitted, default volume mounter will be used - this is determined by probing for ceph-fuse
- # or by setting the default mounter explicitly via --volumemounter command-line argument.
- # mounter: kernel
- reclaimPolicy: Delete
- allowVolumeExpansion: true
- #Supported binding modes are Immediate, WaitForFirstConsumer
- volumeBindingMode: "WaitForFirstConsumer"
- mountOptions:
- # uncomment the following line for debugging
- #- debug
-
- cluster:
- contents: |
- apiVersion: ceph.rook.io/v1
- kind: CephCluster
- metadata:
- name: rook-ceph
- namespace: rook-ceph # namespace:cluster
- spec:
- cephVersion:
- # The container image used to launch the Ceph daemon pods (mon, mgr, osd, mds, rgw).
- # v15 is octopus, and v16 is pacific.
- # RECOMMENDATION: In production, use a specific version tag instead of the general v14 flag, which pulls the latest release and could result in different
- # versions running within the cluster. See tags available at https://hub.docker.com/r/ceph/ceph/tags/.
- # If you want to be more precise, you can always use a timestamp tag such quay.io/ceph/ceph:v16.2.7-20211208
- # This tag might not contain a new Ceph version, just security fixes from the underlying operating system, which will reduce vulnerabilities
- image: quay.io/ceph/ceph:v16.2.7
- # Whether to allow unsupported versions of Ceph. Currently `octopus` and `pacific` are supported.
- # Future versions such as `pacific` would require this to be set to `true`.
- # Do not set to true in production.
- allowUnsupported: false
- # The path on the host where configuration files will be persisted. Must be specified.
- # Important: if you reinstall the cluster, make sure you delete this directory from each host or else the mons will fail to start on the new cluster.
- # In Minikube, the '/data' directory is configured to persist across reboots. Use "/data/rook" in Minikube environment.
- dataDirHostPath: /var/lib/rook
- # Whether or not upgrade should continue even if a check fails
- # This means Ceph's status could be degraded and we don't recommend upgrading but you might decide otherwise
- # Use at your OWN risk
- # To understand Rook's upgrade process of Ceph, read https://rook.io/docs/rook/latest/ceph-upgrade.html#ceph-version-upgrades
- skipUpgradeChecks: false
- # Whether or not continue if PGs are not clean during an upgrade
- continueUpgradeAfterChecksEvenIfNotHealthy: false
- # WaitTimeoutForHealthyOSDInMinutes defines the time (in minutes) the operator would wait before an OSD can be stopped for upgrade or restart.
- # If the timeout exceeds and OSD is not ok to stop, then the operator would skip upgrade for the current OSD and proceed with the next one
- # if `continueUpgradeAfterChecksEvenIfNotHealthy` is `false`. If `continueUpgradeAfterChecksEvenIfNotHealthy` is `true`, then opertor would
- # continue with the upgrade of an OSD even if its not ok to stop after the timeout. This timeout won't be applied if `skipUpgradeChecks` is `true`.
- # The default wait timeout is 10 minutes.
- waitTimeoutForHealthyOSDInMinutes: 10
- mon:
- # Set the number of mons to be started. Generally recommended to be 3.
- # For highest availability, an odd number of mons should be specified.
- count: 3
- # The mons should be on unique nodes. For production, at least 3 nodes are recommended for this reason.
- # Mons should only be allowed on the same node for test environments where data loss is acceptable.
- allowMultiplePerNode: false
- mgr:
- # When higher availability of the mgr is needed, increase the count to 2.
- # In that case, one mgr will be active and one in standby. When Ceph updates which
- # mgr is active, Rook will update the mgr services to match the active mgr.
- count: 1
- modules:
- # Several modules should not need to be included in this list. The "dashboard" and "monitoring" modules
- # are already enabled by other settings in the cluster CR.
- - name: pg_autoscaler
- enabled: true
- # enable the ceph dashboard for viewing cluster status
- dashboard:
- enabled: true
- # serve the dashboard under a subpath (useful when you are accessing the dashboard via a reverse proxy)
- # urlPrefix: /ceph-dashboard
- # serve the dashboard at the given port.
- # port: 8443
- # serve the dashboard using SSL
- ssl: true
- # enable prometheus alerting for cluster
- monitoring:
- # requires Prometheus to be pre-installed
- enabled: false
- # namespace to deploy prometheusRule in. If empty, namespace of the cluster will be used.
- # Recommended:
- # If you have a single rook-ceph cluster, set the rulesNamespace to the same namespace as the cluster or keep it empty.
- # If you have multiple rook-ceph clusters in the same k8s cluster, choose the same namespace (ideally, namespace with prometheus
- # deployed) to set rulesNamespace for all the clusters. Otherwise, you will get duplicate alerts with multiple alert definitions.
- rulesNamespace: rook-ceph
- network:
- # enable host networking
- #provider: host
- # enable the Multus network provider
- #provider: multus
- #selectors:
- # The selector keys are required to be `public` and `cluster`.
- # Based on the configuration, the operator will do the following:
- # 1. if only the `public` selector key is specified both public_network and cluster_network Ceph settings will listen on that interface
- # 2. if both `public` and `cluster` selector keys are specified the first one will point to 'public_network' flag and the second one to 'cluster_network'
- #
- # In order to work, each selector value must match a NetworkAttachmentDefinition object in Multus
- #
- #public: public-conf --> NetworkAttachmentDefinition object name in Multus
- #cluster: cluster-conf --> NetworkAttachmentDefinition object name in Multus
- # Provide internet protocol version. IPv6, IPv4 or empty string are valid options. Empty string would mean IPv4
- #ipFamily: "IPv6"
- # Ceph daemons to listen on both IPv4 and Ipv6 networks
- #dualStack: false
- # enable the crash collector for ceph daemon crash collection
- crashCollector:
- disable: false
- # Uncomment daysToRetain to prune ceph crash entries older than the
- # specified number of days.
- #daysToRetain: 30
- # enable log collector, daemons will log on files and rotate
- # logCollector:
- # enabled: true
- # periodicity: 24h # SUFFIX may be 'h' for hours or 'd' for days.
- # automate [data cleanup process](https://github.com/rook/rook/blob/master/Documentation/ceph-teardown.md#delete-the-data-on-hosts) in cluster destruction.
- cleanupPolicy:
- # Since cluster cleanup is destructive to data, confirmation is required.
- # To destroy all Rook data on hosts during uninstall, confirmation must be set to "yes-really-destroy-data".
- # This value should only be set when the cluster is about to be deleted. After the confirmation is set,
- # Rook will immediately stop configuring the cluster and only wait for the delete command.
- # If the empty string is set, Rook will not destroy any data on hosts during uninstall.
- confirmation: ""
- # sanitizeDisks represents settings for sanitizing OSD disks on cluster deletion
- sanitizeDisks:
- # method indicates if the entire disk should be sanitized or simply ceph's metadata
- # in both case, re-install is possible
- # possible choices are 'complete' or 'quick' (default)
- method: quick
- # dataSource indicate where to get random bytes from to write on the disk
- # possible choices are 'zero' (default) or 'random'
- # using random sources will consume entropy from the system and will take much more time then the zero source
- dataSource: zero
- # iteration overwrite N times instead of the default (1)
- # takes an integer value
- iteration: 1
- # allowUninstallWithVolumes defines how the uninstall should be performed
- # If set to true, cephCluster deletion does not wait for the PVs to be deleted.
- allowUninstallWithVolumes: false
- # To control where various services will be scheduled by kubernetes, use the placement configuration sections below.
- # The example under 'all' would have all services scheduled on kubernetes nodes labeled with 'role=storage-node' and
- # tolerate taints with a key of 'storage-node'.
- # placement:
- # all:
- # nodeAffinity:
- # requiredDuringSchedulingIgnoredDuringExecution:
- # nodeSelectorTerms:
- # - matchExpressions:
- # - key: role
- # operator: In
- # values:
- # - storage-node
- # podAffinity:
- # podAntiAffinity:
- # topologySpreadConstraints:
- # tolerations:
- # - key: storage-node
- # operator: Exists
- # The above placement information can also be specified for mon, osd, and mgr components
- # mon:
- # Monitor deployments may contain an anti-affinity rule for avoiding monitor
- # collocation on the same node. This is a required rule when host network is used
- # or when AllowMultiplePerNode is false. Otherwise this anti-affinity rule is a
- # preferred rule with weight: 50.
- # osd:
- # mgr:
- # cleanup:
- annotations:
- # all:
- # mon:
- # osd:
- # cleanup:
- # prepareosd:
- # If no mgr annotations are set, prometheus scrape annotations will be set by default.
- # mgr:
- labels:
- # all:
- # mon:
- # osd:
- # cleanup:
- # mgr:
- # prepareosd:
- # monitoring is a list of key-value pairs. It is injected into all the monitoring resources created by operator.
- # These labels can be passed as LabelSelector to Prometheus
- # monitoring:
- # crashcollector:
- resources:
- # The requests and limits set here, allow the mgr pod to use half of one CPU core and 1 gigabyte of memory
- # mgr:
- # limits:
- # cpu: "500m"
- # memory: "1024Mi"
- # requests:
- # cpu: "500m"
- # memory: "1024Mi"
- # The above example requests/limits can also be added to the other components
- # mon:
- # osd:
- # For OSD it also is a possible to specify requests/limits based on device class
- # osd-hdd:
- # osd-ssd:
- # osd-nvme:
- # prepareosd:
- # mgr-sidecar:
- # crashcollector:
- # logcollector:
- # cleanup:
- # The option to automatically remove OSDs that are out and are safe to destroy.
- removeOSDsIfOutAndSafeToRemove: true
- # priorityClassNames:
- # all: rook-ceph-default-priority-class
- # mon: rook-ceph-mon-priority-class
- # osd: rook-ceph-osd-priority-class
- # mgr: rook-ceph-mgr-priority-class
- storage: # cluster level storage configuration and selection
- useAllNodes: true
- useAllDevices: true
- #deviceFilter:
- config:
- # crushRoot: "custom-root" # specify a non-default root label for the CRUSH map
- # metadataDevice: "md0" # specify a non-rotational storage so ceph-volume will use it as block db device of bluestore.
- # databaseSizeMB: "1024" # uncomment if the disks are smaller than 100 GB
- # journalSizeMB: "1024" # uncomment if the disks are 20 GB or smaller
- # osdsPerDevice: "1" # this value can be overridden at the node or device level
- # encryptedDevice: "true" # the default value for this option is "false"
- # Individual nodes and their config can be specified as well, but 'useAllNodes' above must be set to false. Then, only the named
- # nodes below will be used as storage resources. Each node's 'name' field should match their 'kubernetes.io/hostname' label.
- # nodes:
- # - name: "172.17.4.201"
- # devices: # specific devices to use for storage can be specified for each node
- # - name: "sdb"
- # - name: "nvme01" # multiple osds can be created on high performance devices
- # config:
- # osdsPerDevice: "5"
- # - name: "/dev/disk/by-id/ata-ST4000DM004-XXXX" # devices can be specified using full udev paths
- # config: # configuration can be specified at the node level which overrides the cluster level config
- # - name: "172.17.4.301"
- # deviceFilter: "^sd."
- # when onlyApplyOSDPlacement is false, will merge both placement.All() and placement.osd
- onlyApplyOSDPlacement: false
- # The section for configuring management of daemon disruptions during upgrade or fencing.
- disruptionManagement:
- # If true, the operator will create and manage PodDisruptionBudgets for OSD, Mon, RGW, and MDS daemons. OSD PDBs are managed dynamically
- # via the strategy outlined in the [design](https://github.com/rook/rook/blob/master/design/ceph/ceph-managed-disruptionbudgets.md). The operator will
- # block eviction of OSDs by default and unblock them safely when drains are detected.
- managePodBudgets: true
- # A duration in minutes that determines how long an entire failureDomain like `region/zone/host` will be held in `noout` (in addition to the
- # default DOWN/OUT interval) when it is draining. This is only relevant when `managePodBudgets` is `true`. The default value is `30` minutes.
- osdMaintenanceTimeout: 30
- # A duration in minutes that the operator will wait for the placement groups to become healthy (active+clean) after a drain was completed and OSDs came back up.
- # Operator will continue with the next drain if the timeout exceeds. It only works if `managePodBudgets` is `true`.
- # No values or 0 means that the operator will wait until the placement groups are healthy before unblocking the next drain.
- pgHealthCheckTimeout: 0
- # If true, the operator will create and manage MachineDisruptionBudgets to ensure OSDs are only fenced when the cluster is healthy.
- # Only available on OpenShift.
- manageMachineDisruptionBudgets: false
- # Namespace in which to watch for the MachineDisruptionBudgets.
- machineDisruptionBudgetNamespace: openshift-machine-api
-
- # healthChecks
- # Valid values for daemons are 'mon', 'osd', 'status'
- healthCheck:
- daemonHealth:
- mon:
- disabled: false
- interval: 45s
- osd:
- disabled: false
- interval: 60s
- status:
- disabled: false
- interval: 60s
- # Change pod liveness probe, it works for all mon,mgr,osd daemons
- livenessProbe:
- mon:
- disabled: false
- mgr:
- disabled: false
- osd:
- disabled: false
\ No newline at end of file
diff --git a/examples/gitops/admin/config/profile/profile-spectro-core.yaml b/examples/gitops/admin/config/profile/profile-spectro-core.yaml
deleted file mode 100644
index cff3420..0000000
--- a/examples/gitops/admin/config/profile/profile-spectro-core.yaml
+++ /dev/null
@@ -1,49 +0,0 @@
-name: spectro-core
-description: spectro-core
-type: add-on
-cloudType: all
-packs:
- - name: "spectro-proxy"
- registry: Public Repo
- type: "spectro"
- layer: "addon"
- version: "1.0.0"
- values: |
- pack:
- spectrocloud.com/install-priority: "-200"
- content:
- images:
- - gcr.io/spectro-images-public/release/frpc:v1.0.0
- - gcr.io/spectro-images-public/release/frpc-init:v1.0.0
- - docker.io/velero/velero:v1.7.1
- - docker.io/velero/velero-plugin-for-aws:v1.3.0
-
- manifests:
- spectro-proxy:
- namespace: "cluster-{{ .spectro.system.cluster.uid }}"
- server: "{{ .spectro.system.reverseproxy.server }}"
- clusterUid: "{{ .spectro.system.cluster.uid }}"
- subdomain: "cluster-{{ .spectro.system.cluster.uid }}"
-
- - name: "lb-metallb"
- registry: Public Repo
- type: "spectro"
- layer: "addon"
- version: "0.11.0"
- values: |
- pack:
- spectrocloud.com/install-priority: "0"
- content:
- images:
- - quay.io/metallb/controller:v0.11.0
- - quay.io/metallb/speaker:v0.11.0
- manifests:
- metallb:
- #The namespace to use for deploying MetalLB
- namespace: "metallb-system"
- #MetalLB will skip setting .0 & .255 IP address when this flag is enabled
- avoidBuggyIps: true
- # Layer 2 config; The IP address range MetalLB should use while assigning IP's for svc type LoadBalancer
- # For the supported formats, check https://metallb.universe.tf/configuration/#layer-2-configuration
- addresses:
- - 192.168.100.245-192.168.100.254
\ No newline at end of file
diff --git a/examples/gitops/admin/config/project/project-hospital-201.yaml b/examples/gitops/admin/config/project/project-hospital-201.yaml
deleted file mode 100644
index a851cf7..0000000
--- a/examples/gitops/admin/config/project/project-hospital-201.yaml
+++ /dev/null
@@ -1,3 +0,0 @@
-name: hospital-201
-description: "New project for hospital 201"
-import_url: https://gitlab.com/rishi-anand/medium-appliance-template.git
diff --git a/examples/gitops/admin/gitlab_project.tf b/examples/gitops/admin/gitlab_project.tf
deleted file mode 100644
index 7582adc..0000000
--- a/examples/gitops/admin/gitlab_project.tf
+++ /dev/null
@@ -1,62 +0,0 @@
-#locals {
-# gitlab_project_ids = {
-# for k, v in gitlab_project.this :
-# v.name => v.id
-# }
-#}
-#
-#resource "gitlab_project" "this" {
-# for_each = local.projects
-#
-# name = each.value.name
-# description = each.value.description
-# visibility_level = "public" # or 'private'
-# pipelines_enabled = true
-# shared_runners_enabled = true # shared runners means runners from different project can be used
-# import_url = each.value.import_url
-#}
-#
-#resource "gitlab_project_variable" "host" {
-# for_each = local.projects
-#
-# project = local.gitlab_project_ids[each.value.name]
-# key = "SC_HOST_DEV"
-# value = var.sc_host
-# protected = false
-#}
-#
-#resource "gitlab_project_variable" "username" {
-# for_each = local.projects
-#
-# project = local.gitlab_project_ids[each.value.name]
-# key = "SC_USERNAME_DEV"
-# value = var.sc_username
-# protected = false
-#}
-#
-#resource "gitlab_project_variable" "password" {
-# for_each = local.projects
-#
-# project = local.gitlab_project_ids[each.value.name]
-# key = "SC_PASSWORD_DEV"
-# value = var.sc_password
-# protected = false
-#}
-#
-#resource "gitlab_project_variable" "project" {
-# for_each = local.projects
-#
-# project = local.gitlab_project_ids[each.value.name]
-# key = "SC_PROJECT_DEV"
-# value = each.value.name
-# protected = false
-#}
-#
-#resource "gitlab_project_variable" "statekey" {
-# for_each = local.projects
-#
-# project = local.gitlab_project_ids[each.value.name]
-# key = "PROJECT_TF_STATE"
-# value = each.value.name
-# protected = false
-#}
\ No newline at end of file
diff --git a/examples/gitops/admin/main.tf b/examples/gitops/admin/main.tf
deleted file mode 100644
index 1af2444..0000000
--- a/examples/gitops/admin/main.tf
+++ /dev/null
@@ -1,55 +0,0 @@
-terraform {
- required_version = ">= 0.14.0"
-
- required_providers {
- spectrocloud = {
- version = "= 0.6.5-pre"
- source = "spectrocloud/spectrocloud"
- }
-
-# gitlab = {
-# source = "gitlabhq/gitlab"
-# version = "3.6.0"
-# }
- }
-}
-
-variable "sc_host" {}
-variable "sc_api_key" {
- sensitive = true
-}
-
-provider "spectrocloud" {
- host = var.sc_host
- api_key = var.sc_api_key
- project_name = ""
-}
-
-#variable "gitlab_token" {}
-#
-#provider "gitlab" {
-# token = var.gitlab_token
-#}
-
-locals {
- projects = {
- for k in fileset("config/project", "project-*.yaml") :
- trimsuffix(k, ".yaml") => yamldecode(file("config/project/${k}"))
- }
-
- profiles = {
- for k in fileset("config/profile", "profile-*.yaml") :
- trimsuffix(k, ".yaml") => yamldecode(file("config/profile/${k}"))
- }
-}
-
-module "Spectro" {
- source = "github.com/spectrocloud/terraform-spectrocloud-modules"
-
- # It is recommended to use latest version of module instead of using latest from github
- #source = "spectrocloud/modules/spectrocloud"
- #version = "0.0.7"
-
- projects = local.projects
- profiles = local.profiles
-}
diff --git a/examples/gitops/admin/terraform.template.tfvars b/examples/gitops/admin/terraform.template.tfvars
deleted file mode 100644
index f67ec00..0000000
--- a/examples/gitops/admin/terraform.template.tfvars
+++ /dev/null
@@ -1,5 +0,0 @@
-# Credentials
-sc_host = "{enter Spectro Cloud host, blank for SaaS}"
-sc_api_key = "{enter Spectro Cloud API Key}"
-
-#gitlab_token = "{enter Gitlab access token}"
\ No newline at end of file
diff --git a/examples/gitops/project/README.md b/examples/gitops/project/README.md
deleted file mode 100644
index a28ca2e..0000000
--- a/examples/gitops/project/README.md
+++ /dev/null
@@ -1,6 +0,0 @@
-# gitops-project
-
-Copy `terraform.template.tfvars` file to `terraform.tfvars` and key in Spectro Cloud credentials and other details.
-Make sure all files for project, appliance, cluster have correct values and then run terraform.
-
-While copying files, just copy files present in this repo.
\ No newline at end of file
diff --git a/examples/gitops/project/config/appliance/appliance-hospital-201.yaml b/examples/gitops/project/config/appliance/appliance-hospital-201.yaml
deleted file mode 100644
index 1a86e7c..0000000
--- a/examples/gitops/project/config/appliance/appliance-hospital-201.yaml
+++ /dev/null
@@ -1,2 +0,0 @@
-uid: "hospital-201"
-name: "hospital-san-jose"
diff --git a/examples/gitops/project/config/cluster/cluster-hospital-201.yaml b/examples/gitops/project/config/cluster/cluster-hospital-201.yaml
deleted file mode 100644
index 15f0e44..0000000
--- a/examples/gitops/project/config/cluster/cluster-hospital-201.yaml
+++ /dev/null
@@ -1,50 +0,0 @@
-name: hospital-201
-cloudType: libvirt
-tags:
-- "hospital-201"
-- "skip_completion"
-profiles:
- infra:
- name: bm-infra
- system:
- name: bm-system-profile
- addons:
- - name: spectro-core
-cloud_config:
- ssh_key: spectro2022
- vip: 192.168.100.15
- ntp_servers: ["ntp.ge.com"]
-node_groups:
- - name: master-pool
- control_plane: true
- control_plane_as_worker: true
- count: 1
- disk_size_gb: 60
- memory_mb: 8192
- cpu: 2
- cpus_sets: 1
- attached_disks_size_gb: "10"
- placements:
- - appliance: "hospital-201"
- network_type: "bridge"
- network_names: "br0"
- network: "br"
- image_storage_pool: "ehl_images"
- target_storage_pool: "ehl_images"
- data_storage_pool: "ehl_data"
-
- - name: worker-pool
- count: 3
- disk_size_gb: 60
- memory_mb: 8192
- cpu: 4
- cpus_sets: 1
- attached_disks_size_gb: "10"
- placements:
- - appliance: "hospital-201"
- network_type: "bridge"
- network_names: "br0"
- network: "br"
- image_storage_pool: "ehl_images"
- target_storage_pool: "ehl_images"
- data_storage_pool: "ehl_data"
diff --git a/examples/gitops/project/config/profile/profile-system.yaml b/examples/gitops/project/config/profile/profile-system.yaml
deleted file mode 100644
index 31f35ae..0000000
--- a/examples/gitops/project/config/profile/profile-system.yaml
+++ /dev/null
@@ -1,673 +0,0 @@
-name: bm-system-profile
-description: bm-system-profile
-type: system
-cloudType: all
-packs:
- - name: "pfsense-gateway"
- type: manifest
- registry: Public Repo
- manifests:
- - name: pfsense-gateway-config
- content: |
- ---
- apiVersion: v1
- data:
- user-data.tmpl: |
- #cloud-config
- write_files:
- - encoding: base64
- content: $${CONFIG_XML}
- owner: root:root
- path: /cf/conf/config.xml
- permissions: '0644'
- config-xml.tmpl: |
-
-
- 21.5
-
-
- normal
- gateway
- edison
-
- all
-
- system
- 1998
-
-
- admins
-
- system
- 1999
- 0
- page-all
-
-
- admin
-
- system
- admins
- $2y$10$K3ejlPZsxpqhd63EzyNgf.qK4RDhlSvW2ln.gIq7kbkVdlRiZ1XB2
- 0
- user-shell-access
- 2
- c3NoLXJzYSBBQUFBQjNOemFDMXljMkVBQUFBREFRQUJBQUFCZ1FDdzR4OW43NEtsVnRqUnBBRUhSVWJldnI2dVBDa0NnYkpHSTBKTUFhdnJ3K2JlTmI1V0wyOE1nOFU0MzBiWnhJRWFHU05pSXQreDk2aGFPY0RjOEFaMjRDa0Uxb1ZUS2x6aExyQkJGc0hmaVpuMHRYK1dtZWIyaGZBcmxQRnk2ZXJaZHRVTTUrYUQ2VFBYZmR4M0VWT2FBRzNncTFhbS9lSzFBM1NPVTVjTnFZMExjcWpGVDc0dG03T0NNc1VFSkg4emlCKzQ3ZTh2TWtDZjVIcTJqdVd5RjU0NTNiYzRaRHBMcW1hSXZ0UGpyN3NyQllLczI2MWpxRFlOWEF1aytyTExoRUt4VmNuY1o1U1pDbDJyWGVIdlRnb3lqZE1IWjYvWTNKZXhuYmg0Q2dkUVlmUU9HQStyQ01mVy9MTmFBV2hCUUlWRzZ1QU5mSEhhMTZQVE1XQTQzdlhTdUFCQ0VRQmR5VVhMQi9WWFlmcklOWTJQTnNPOG1FenVWcFRSaEQrSnRpT3BFejVQTlNXZk9qSUpSYjBBSjVDR2pHb0pSczdrck5QKzZsUWlTL0k5TnV4OThURjRla2FsYkMrOG9wUGFnTEhuL2NxaG9sSm1qQXp6QzV1by9yY1VKcldUNEYvODNoYmpUdEhKT01KdEZXV2lZcURmUWVoOENGOUJoeGs9IHJvb3RAbG9jYWxob3N0
-
- pfSense.css
-
-
- user
- $2y$10$5i3toKDJLCdLS0w71m7TF.49vokuIneuA97Tq3cJYotKzeEbn0Lqu
-
- sysgwservice
-
- 2
-
-
- pfSense.css
- 2001
- user-shell-access
-
- 2000
- 2000
- $${NTP}
-
- http
-
- 602232b5962a3
- 2
- 28080
- 2
- pfSense.css
- 1e3f75;
-
-
- yes
-
-
-
- 400000
- hadp
- hadp
- hadp
-
- monthly
-
-
-
- enabled
- enabled
- 25812
-
- Etc/UTC
- 115200
- serial
-
-
-
-
- en_US
-
-
- $${DNS_1}
- $${DNS_2}
-
-
-
-
-
- vtnet1
-
- $${IP_ADDR_WAN}
-
-
- 32
-
-
-
-
-
-
-
- SavedCfg
-
-
-
-
-
-
-
- dhcp6
-
- 0
- wan
-
-
-
-
- vtnet0
-
-
- $${IP_ADDR_LAN}
- $${SUBNET_LAN}
-
-
-
-
-
-
-
- $${DHCP_RANGE_START}
- $${DHCP_RANGE_END}
-
-
-
-
- 94670856
- 189341712
-
-
-
-
-
-
-
-
- hmac-md5
-
-
-
- allow
- gateway.edison
-
-
-
-
-
-
-
-
-
-
-
-
-
- ::1000
- ::2000
-
- assist
- medium
-
-
-
-
-
- public
-
-
-
-
-
- 1
-
-
-
- automatic
-
-
-
-
-
- 1626111739
- pass
- wan
- inet
-
-
-
-
-
-
-
-
-
- tcp
-
-
-
- 25812
-
-
-
-
- pass
- inet
-
- lan
- 0100000101
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
- -
- 1,31
- 0-5
- *
- *
- *
- root
- /usr/bin/nice -n20 adjkerntz -a
-
- -
- 1
- 3
- 1
- *
- *
- root
- /usr/bin/nice -n20 /etc/rc.update_bogons.sh
-
- -
- 1
- 1
- *
- *
- *
- root
- /usr/bin/nice -n20 /etc/rc.dyndns.update
-
- -
- */60
- *
- *
- *
- *
- root
- /usr/bin/nice -n20 /usr/local/sbin/expiretable -v -t 3600 virusprot
-
- -
- 30
- 12
- *
- *
- *
- root
- /usr/bin/nice -n20 /etc/rc.update_urltables
-
- -
- 1
- 0
- *
- *
- *
- root
- /usr/bin/nice -n20 /etc/rc.update_pkg_metadata
-
- -
- */1
- *
- *
- *
- *
- root
- /usr/sbin/newsyslog
-
- -
- 1
- 3
- *
- *
- *
- root
- /etc/rc.periodic daily
-
- -
- 15
- 4
- *
- *
- 6
- root
- /etc/rc.periodic weekly
-
- -
- 30
- 5
- 1
- *
- *
- root
- /etc/rc.periodic monthly
-
-
-
-
-
-
-
- system_information:col1:show,netgate_services_and_support:col2:show,interfaces:col2:show
- 10
-
-
-
-
- all
- all
-
-
-
-
-
- 602232b5962a3
-
-
-
- transparent
-
-
-
-
-
-
-
-
- 602232b5962a3
-
- server
- LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUVoRENDQTJ5Z0F3SUJBZ0lCQURBTkJna3Foa2lHOXcwQkFRc0ZBREJhTVRnd05nWURWUVFLRXk5d1psTmwKYm5ObElIZGxZa052Ym1acFozVnlZWFJ2Y2lCVFpXeG1MVk5wWjI1bFpDQkRaWEowYVdacFkyRjBaVEVlTUJ3RwpBMVVFQXhNVmNHWlRaVzV6WlMwMk1ESXlNekppTlRrMk1tRXpNQjRYRFRJeE1ESXdPVEEyTlRrd01Wb1hEVEl5Ck1ETXhOREEyTlRrd01Wb3dXakU0TURZR0ExVUVDaE12Y0daVFpXNXpaU0IzWldKRGIyNW1hV2QxY21GMGIzSWcKVTJWc1ppMVRhV2R1WldRZ1EyVnlkR2xtYVdOaGRHVXhIakFjQmdOVkJBTVRGWEJtVTJWdWMyVXROakF5TWpNeQpZalU1TmpKaE16Q0NBU0l3RFFZSktvWklodmNOQVFFQkJRQURnZ0VQQURDQ0FRb0NnZ0VCQU1ZblNtUG93OUt6Cml5b2ZVeW5ZaGN2YnN3YTVaVFVYTVVmZGdjaTlzamp2Ymw1eUhHZlhaY1BCdzRuUk14eUQ4enE0dEtuZ2EzSEkKNFNqbDJkamljdHM2c0loQ1FxbUg4bGpUWWVjRkw3WDlpRUdWaHpnU0tPcURzS3NUeGhaMEs0TkVwMlM1bUdTUgowZ0pWbTlBeTJDUzhVWUFjRFUrVmd5bVR5cVVUZW5CcEVyVzlWSEZmUnhxZTBUcFlmT2tXbjRSb3RxMnNrRGlhCnN4dWhWd2pMU3dNR2lqSnJ0aTM3UGQyRnd0UW1CRGY4TEI3NmhuY1BpMmVDWXE1Ly9EZnllZkRPOVVEZ0ZxL3QKNHp6d3JkT2FTcGJoVzVRV0RjWGtNQU96OGNxTTJZZVFxVVZGRGV0aFFPcnZrMjQyaXRMTWtzWmh5aHFOL21ITgpTN0JZN3F6QXVCY0NBd0VBQWFPQ0FWTXdnZ0ZQTUFrR0ExVWRFd1FDTUFBd0VRWUpZSVpJQVliNFFnRUJCQVFECkFnWkFNQXNHQTFVZER3UUVBd0lGb0RBekJnbGdoa2dCaHZoQ0FRMEVKaFlrVDNCbGJsTlRUQ0JIWlc1bGNtRjAKWldRZ1UyVnlkbVZ5SUVObGNuUnBabWxqWVhSbE1CMEdBMVVkRGdRV0JCUzY1eDhTQmJhY3FsN3gzL05UcEFqTgpPUlpaNmpDQmdnWURWUjBqQkhzd2VZQVV1dWNmRWdXMm5LcGU4ZC96VTZRSXpUa1dXZXFoWHFSY01Gb3hPREEyCkJnTlZCQW9UTDNCbVUyVnVjMlVnZDJWaVEyOXVabWxuZFhKaGRHOXlJRk5sYkdZdFUybG5ibVZrSUVObGNuUnAKWm1sallYUmxNUjR3SEFZRFZRUURFeFZ3WmxObGJuTmxMVFl3TWpJek1tSTFPVFl5WVRPQ0FRQXdKd1lEVlIwbApCQ0F3SGdZSUt3WUJCUVVIQXdFR0NDc0dBUVVGQndNQ0JnZ3JCZ0VGQlFnQ0FqQWdCZ05WSFJFRUdUQVhnaFZ3ClpsTmxibk5sTFRZd01qSXpNbUkxT1RZeVlUTXdEUVlKS29aSWh2Y05BUUVMQlFBRGdnRUJBTVFoWi9JNy8vbVAKSVh0dHo0Y0V3MlQ5SjdlSlpGQStSUy9UcStPUVlMU2tSWTNTQVh0cElYWWRhTkdFYWxuVDM4ZmZ6VllFb2hLawpKTXpTUURkUkxYYmZRS2oxZi9ZM1B0NFlOeFg2Q2lFTUI4cWFINFlOZmphVk1pdVloaUIwKzZiaW9Ic1plR2tECitQYlEzTzZ3cUg3aWVlMEM0Q2FHUUhWVFphQ2dOelBFUU9WYmdJc245Wm1UZlBETHF4OG9RZkRybm1ETzFwV1kKWHowMDlWTmRyb3VWNlVEc0xlSTd2VjAwQ3RNa2NFSU1ZTlBaSHliYjZ4Y3EzRU1BQjlYdG9BeklLYXdGWDA3bwo1bkR1T3dqZ0d2SzdqcHp1bmxpNm54TVNlYlpocjdXOWQra1ZUQU1Qbk5zd2ZsY0hvZXo5NXFYVGZzSjA3MXVzCmZYLzloSWZ6cHlzPQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0tCg==
- LS0tLS1CRUdJTiBQUklWQVRFIEtFWS0tLS0tCk1JSUV2UUlCQURBTkJna3Foa2lHOXcwQkFRRUZBQVNDQktjd2dnU2pBZ0VBQW9JQkFRREdKMHBqNk1QU3M0c3EKSDFNcDJJWEwyN01HdVdVMUZ6RkgzWUhJdmJJNDcyNWVjaHhuMTJYRHdjT0owVE1jZy9NNnVMU3A0R3R4eU9Fbwo1ZG5ZNG5MYk9yQ0lRa0twaC9KWTAySG5CUysxL1loQmxZYzRFaWpxZzdDckU4WVdkQ3VEUktka3VaaGtrZElDClZadlFNdGdrdkZHQUhBMVBsWU1wazhxbEUzcHdhUksxdlZSeFgwY2FudEU2V0h6cEZwK0VhTGF0ckpBNG1yTWIKb1ZjSXkwc0RCb295YTdZdCt6M2RoY0xVSmdRMy9Dd2Urb1ozRDR0bmdtS3VmL3czOG5ud3p2VkE0QmF2N2VNOAo4SzNUbWtxVzRWdVVGZzNGNURBRHMvSEtqTm1Ia0tsRlJRM3JZVURxNzVOdU5vclN6SkxHWWNvYWpmNWh6VXV3CldPNnN3TGdYQWdNQkFBRUNnZ0VBU1BrWlBUZndQYjBmSi9FSFNHYlR4eUp1aUxCbjVDaFpYclRsWWM3eVBtTjIKaXRPTlFIU2pwbmJLRXd2MnZhNmVPV2JpS1psYkdEazVLajJUZjNyTmY0emFqQ09TdHlvNGhwdTU5MEVDdEF2ZApUUUhMSFV3YXcxcWlDaTZyQ1lQWXJtdnZGWG4vOURGN1hKNjRWZkVUaGx2UHk5VUkxZUVpdjRJTTQ4UUxtQytECjZ2dlNRUXQzc0dxVXJjTlFPejRROUQ1RmJMVThYWFFqZEhFVThWY1oyQlBSWU1tZHRwS0FVUUo2M3MxVDliR3kKaUhMWFdRb1BFWkpZN3lLWERjYmQ2T20zKzB4VXhEVFlmdTExdDk5dHBLaWFmM0plcFVHTEMvN0pwZ2hjN21seApmUWVhSWtoTGphSGZJalNBUWV6UTJqd1hmWGJib1AvNXp2dENhK0Q1QVFLQmdRRGtvYVhHZHBNNC9yUFgyTThaCk1wSlhBZEhvRE5CRDVWVXlqTnphRnBvTFFZa1lDWHdESHgvZnR5VDJHcnJibmdScEdzdndzWEVnS09NK3BGOGoKeVI0OG9LU1A3b1lpMFp5SUpKVERJZ2djczltMXRUVk1xd0NLNGVqTmNzZnV4ZGhma255U2l2bm45N0VnaFUrOQpQREpCamdRZys2NytIdEtBQmRzNG9mSnVnUUtCZ1FEZDM2ZGNvSEJoc1liYWlEM2cyVFdHTDhHeDNNWEI2dlQyCjFJamlISFg0U0xEQi9nMWlGUW5jQkkzM01SV3Q1ajhVc3dZWnE5bWdSZDlXNy95eDNLcFlYUU8rdDZKakcrVkQKVm43T2xPQ2s5Y3FCN0Vhbm9ZaFRmcXVSbGhYUkoxK1gyem9ITmJ6Zm0xbU5YNWtKeWdPZ1BlYy9PWmpZWnV1ZQpkWnFVUHpDS2x3S0JnUUNkVmRCckh6WjVSbTRrNEFMRTZGUmtvOC83ekxXVnc4QTVwVTVtbHA4emw4bnNUTHFXCnUxaUg3OVR6dDdsWFlRclArYXpXMm9WckNEZzU1NHZCYUtueU51cUlVakRrelRMWFRiVkNBNjFwY3lzakxLeWQKTWlDUmNwMXVwTXFKdW9VSVlCcHlEa21rSklRMFNRbHl4SFloUHNZQW5MTDBGZW51U0Jsd2c2SCtBUUtCZ0FxWgprOHBXRStobTdkVms2dWZpR0srSGVvNS95S2JpRGIvNUdXME1CUXJPbGpzK1l1OUxxQVB0NThabWZ1Mm04VjZRCnl2clEvZjJ0T09WVFk5UXZnYWZsRWJlNzJETTU4dk1MbjlWSXdZNi8rMlBIb1B5RE9Cc2RFeDlLZDFKZHVqQnMKRmRLZ1c1eW5ORmtQdHlVRGxMYkVIbGNqN2t4d1psTlV0UVRlRy9pVEFvR0FHb1d5QmJBeWcxbjhWcVBZMm9oTgpnR0FNRm13bzBtQjhWZXN2TFh4TzZubFlKMUI5RWRHVDNvR0FFSzFkc084ZzR1WXMwMkFQYWxEUnFoaUIzV2JGCnVPQ1dxRDljZ2Z6TGdWaXdkOHVLTXFhSHNOTjRWeXpIZ0UyYWY3QnlLa2czQWRBUXFDNk56ZXVvRzRhYm14OWcKTzRwVWFqZWdBd3Nic2lUektmYVpUNEE9Ci0tLS0tRU5EIFBSSVZBVEUgS0VZLS0tLS0K
-
-
-
- API
- api
-
- https://github.com/jaredhendrickson13/pfsense-api
- System
- %%PKGVERSION%%
- api.xml
- jaredhendrickson13@gmail.com
-
-
-
- lan
- jwt
- json
- e568b85aed939826ead0199aa4238e399c9e12806f4dc655f8ee4a74818a19a2
- 3600
- sha256
- 16
-
-
-
-
-
- /api/
-
-
-
-
- /api/v1/
-
-
-
-
-
-
-
-
-
- 5
- enabled
-
-
- auto
- lan
- yes
- yes
-
-
-
-
-
-
-
-
-
- edison
- !
-
-
-
-
- kind: ConfigMap
- metadata:
- name: pfsense-vm-files
- ---
- apiVersion: v1
- data:
- credentials: VVJJX1RFTVBMQVRFOiAicWVtdStzc2g6Ly8lc0Alcy9zeXN0ZW0/a2V5ZmlsZT0lcyZzb2NrZXQ9JXMma25vd25faG9zdHNfdmVyaWZ5PWlnbm9yZSI=
- kind: Secret
- metadata:
- name: libvirt-account-creds
- type: Opaque
- ---
- apiVersion: terraform.core.oam.dev/v1beta1
- kind: Provider
- metadata:
- name: libvirt
- spec:
- provider: custom
- credentials:
- source: Secret
- secretRef:
- name: libvirt-account-creds
- key: credentials
- ---
- apiVersion: terraform.core.oam.dev/v1beta1
- kind: Configuration
- metadata:
- name: pfsense-gateway-vm-config
- spec:
- volumeSpec:
- volumeMounts:
- - name: libvirt
- mountPath: /var/run/libvirt/libvirt-sock
- - name: spectro-directory
- mountPath: /opt/spectrocloud
- - mountPath: "/var/files"
- name: files-vol
- volumes:
- - name: files-vol
- configMap:
- name: pfsense-vm-files
- - name: libvirt
- hostPath:
- path: /var/run/libvirt/libvirt-sock
- type: Socket
- - name: spectro-directory
- hostPath:
- path: /opt/spectrocloud
- type: Directory
- deleteResource: true
- variable:
- VM_NAME: "pfsense-gateway-vm"
- NTP: FROM_SECRET_REF
- DNS_1: FROM_SECRET_REF
- DNS_2: FROM_SECRET_REF
- IP_ADDR_WAN: FROM_SECRET_REF
- IP_ADDR_LAN: FROM_SECRET_REF
- SUBNET_LAN: FROM_SECRET_REF
- DHCP_RANGE_START: FROM_SECRET_REF
- DHCP_RANGE_END: FROM_SECRET_REF
- providerRef:
- name: libvirt
- hcl: |
- terraform {
- required_version = ">= 0.13"
- required_providers {
- libvirt = {
- source = "dmacvicar/libvirt"
- version = "0.6.14"
- }
- }
- }
-
- ##### VARIABLES #####
- variable "URI_TEMPLATE" {
- type = string
- default = "qemu+ssh://%s@%s/system?keyfile=%s&socket=%s&known_hosts_verify=ignore"
- }
-
- variable "NTP" {
- type = string
- }
-
- variable "DNS_1" {
- type = string
- }
-
- variable "DNS_2" {
- type = string
- }
-
- variable "IP_ADDR_WAN" {
- type = string
- }
-
- variable "IP_ADDR_LAN" {
- type = string
- }
-
- variable "SUBNET_LAN" {
- type = string
- }
-
- variable "DHCP_RANGE_START" {
- type = string
- }
-
- variable "DHCP_RANGE_END" {
- type = string
- }
-
- variable "LIBVIRT_SOCKET" {
- type = string
- default = "/var/run/libvirt/libvirt-sock"
- }
-
- variable "VM_NAME" {
- type = string
- default = "pfsense-terraform"
- }
-
- ##### OUTPUT #####
- output "config_value" {
- value = data.template_file.config.rendered
- }
-
- output "user_data_value" {
- value = data.template_file.user_data.rendered
- }
-
- ##### PROVIDER #####
- provider "libvirt" {
- uri = "qemu:///system"
- #uri = format(var.URI_TEMPLATE, var.SSH_USER, var.HOST_IP, var.SSH_KEY, var.LIBVIRT_SOCKET)
- }
-
- data "template_file" "config" {
- template = file("/var/files/config-xml.tmpl")
- vars = {
- NTP = var.NTP
- DNS_1 = var.DNS_1
- DNS_2 = var.DNS_2
- IP_ADDR_WAN = var.IP_ADDR_WAN
- IP_ADDR_LAN = var.IP_ADDR_LAN
- SUBNET_LAN = var.SUBNET_LAN
- DHCP_RANGE_START = var.DHCP_RANGE_START
- DHCP_RANGE_END = var.DHCP_RANGE_END
- }
- }
-
- data "template_file" "user_data" {
- template = file("/var/files/user-data.tmpl")
- vars = {
- CONFIG_XML = base64encode(data.template_file.config.rendered)
- }
- }
-
- resource "libvirt_pool" "ubuntu" {
- name = "ubuntuop"
- type = "dir"
- path = "/var/lib/libvirt/terraform-provider-libvirt-pool-ubuntuoperator"
- }
-
- resource "libvirt_volume" "ubuntu-qcow2" {
- name = "ubuntu-qcow2"
- pool = libvirt_pool.ubuntu.name
- source = "/opt/spectrocloud/vm-operator/pfsense-cloudinit-base-gehc-2.5.1-SC.qcow2"
- format = "qcow2"
- }
-
- resource "libvirt_cloudinit_disk" "commoninit" {
- name = "commoninit.iso"
- user_data = data.template_file.user_data.rendered
- pool = libvirt_pool.ubuntu.name
- }
-
- resource "libvirt_domain" "domain-ubuntu" {
- name = var.VM_NAME
- memory = "2048"
- vcpu = 1
-
- cloudinit = libvirt_cloudinit_disk.commoninit.id
-
- network_interface {
- bridge = "br0"
- }
-
- network_interface {
- bridge = "br1"
- }
-
- console {
- type = "pty"
- target_port = "0"
- target_type = "serial"
- }
-
- disk {
- volume_id = libvirt_volume.ubuntu-qcow2.id
- }
-
- graphics {
- type = "vnc"
- listen_type = "address"
- listen_address = "0.0.0.0"
- autoport = true
- }
- }
diff --git a/examples/gitops/project/main.tf b/examples/gitops/project/main.tf
deleted file mode 100644
index e7c60db..0000000
--- a/examples/gitops/project/main.tf
+++ /dev/null
@@ -1,61 +0,0 @@
-terraform {
- required_version = ">= 0.14.0"
-
- required_providers {
- spectrocloud = {
- version = "= 0.6.5-pre"
- source = "spectrocloud/spectrocloud"
- }
- }
-}
-
-variable "sc_host" {}
-variable "sc_api_key" {
- sensitive = true
-}
-variable "sc_project_name" {}
-
-provider "spectrocloud" {
- host = var.sc_host
- api_key = var.sc_api_key
- project_name = var.sc_project_name
-}
-
-locals {
- profiles = {
- for k in fileset("config/profile", "profile-*.yaml") :
- trimsuffix(k, ".yaml") => yamldecode(file("config/profile/${k}"))
- }
-
- appliances = {
- for k in fileset("config/appliance", "appliance-*.yaml") :
- trimsuffix(k, ".yaml") => yamldecode(file("config/appliance/${k}"))
- }
-
- clusters = {
- for k in fileset("config/cluster", "cluster-*.yaml") :
- trimsuffix(k, ".yaml") => yamldecode(file("config/cluster/${k}"))
- }
-}
-
-module "Spectro" {
- source = "github.com/spectrocloud/terraform-spectrocloud-modules"
-
- # It is recommended to use latest version of module instead of using latest from github
- #source = "spectrocloud/modules/spectrocloud"
- #version = "0.0.7"
-
- profiles = local.profiles
- appliances = local.appliances
-}
-
-module "SpectroClusters" {
- depends_on = [module.Spectro]
- source = "github.com/spectrocloud/terraform-spectrocloud-modules"
-
- # It is recommended to use latest version of module instead of using latest from github
- #source = "spectrocloud/modules/spectrocloud"
- #version = "0.0.7"
-
- clusters = local.clusters
-}
diff --git a/examples/gitops/project/terraform.template.tfvars b/examples/gitops/project/terraform.template.tfvars
deleted file mode 100644
index 3c4423e..0000000
--- a/examples/gitops/project/terraform.template.tfvars
+++ /dev/null
@@ -1,4 +0,0 @@
-# Credentials
-sc_host = "{enter Spectro Cloud host, blank for SaaS}"
-sc_api_key = "{enter Spectro Cloud API Key}"
-sc_project_name = "{enter Spectro Cloud Project Name}"
\ No newline at end of file
diff --git a/examples/libvirt/config/profile/profile-system.yaml b/examples/libvirt/config/profile/profile-system.yaml
deleted file mode 100644
index ab1bd43..0000000
--- a/examples/libvirt/config/profile/profile-system.yaml
+++ /dev/null
@@ -1,725 +0,0 @@
-name: system-profile-simyam-libvirt
-description: system-profile
-type: system
-cloudType: all
-packs:
- - name: "pfsense-gateway"
- type: manifest
- registry: Public Repo
- manifests:
- - name: pfsense-gateway-config
- content: |
- # unattended file
- # ---
- # edge:
- # additionalK8SResourceSpec:
- # storageResourceSpecs:
- # -
- # data:
- # TF_VAR_DHCP_RANGE_END: "192.168.100.250"
- # TF_VAR_DHCP_RANGE_START: "192.168.100.50"
- # TF_VAR_DNS_1: "10.220.220.220"
- # TF_VAR_DNS_2: "10.220.220.221"
- # TF_VAR_IP_ADDR_LAN: "192.168.100.2"
- # TF_VAR_IP_ADDR_WAN: "dhcp"
- # TF_VAR_NTP: "ntp.ge.com ntp1.ge.com"
- # TF_VAR_SUBNET_LAN: "24"
- # TF_VAR_VM_NAME: pfsense-gateway-vm
- # name: variable-pfsense-gateway-vm-config
- # storageResourceType: Secret
- # -
- # data:
- # registries: |
- # - url: "harbor-ehl.spectrocloud.com"
- # type: docker
- # name: spectro-ehl-harbor
- # username: admin
- # password: "oGBi1Ej1zirN"
- # - url: "gcr.io"
- # type: docker
- # name: gcr
- # username: _json_key
- # jsonCred: ewogInR5cGUiOiAic2VydmljZV9hY2NvdW50IiwKICJwcm9qZWN0X2lkIjogInNwZWN0cm8taW1hZ2VzIiwKICJwcml2YXRlX2tleV9pZCI6ICI4NDdjMDkxOTBmY2NmYWM2OGZlOWJhZmI5ZTFmMWViZjE3MWM0ZWJjIiwKICJwcml2YXRlX2tleSI6ICItLS0tLUJFR0lOIFBSSVZBVEUgS0VZLS0tLS1cbk1JSUV2UUlCQURBTkJna3Foa2lHOXcwQkFRRUZBQVNDQktjd2dnU2pBZ0VBQW9JQkFRQ3F4UzF5YjlTdEx4VXJcbmM5STNtYkpvU3BaTVRuSW5HeGhjZ3dxb0UzTi9ieU9sQ1dINWx1VllEbVNlWWVHZFU4SFhOeXRLS1NsMXpGWkpcbmJROHVianNONDhtK1ZMQ0pjV2RMZXh4RWdFaS9PYld5QTFmdTl2eVRNVE9IOFBFOG5nT2h4Vkkya2RJZ0l6dC9cbk1EWDgrQnRTUGRzb3hMNEVkNHR5dkhTTU5vZlJ5T3BPem1lTndkRU91dDV4RHVPOFMrMnVzdWdQbUx1MUhQRWZcblBCcmU4M25YSFBrenV5dUxvOExnUm1vQUtDdE4zUjg2WUh3U0dxL1dzQzNrWDQ0a2g1YzlDTXVOTEFmZCszQjNcbjhkUW9nS0gxS3N3UVo4T1llcTdaYnFQcnNZdDFvb0p0Y25XaHZ1aHRqVWM3L3pIYWRSYVNQNFdtYmxyQTNoN1JcbmhLT0NBeFNWQWdNQkFBRUNnZ0VBQUs3V01XU3JzOFlDSVpEYlVLdG8vVmt2ME9TQ09QYlFyVG9LSXl1K1dBR2dcbmV5ckp2VGl5a2VsSTB5RzdTQ2l4c2FwWmhLdm8zd25WZDdXQ0RJajg3QkxNY1VDR0RqaERMNWYrV2FYSWZrd1dcbnNBbEVZdy9GbkRpd2M4VUhOc0cvYkN1ejg4VHE3UjFnanFNcGJCemY1K1hMb2VMT3ZvbWhEajNxR3B4QUJRRjFcbkc4MFJnMmFEbWordHR4N1Z1SlpGRzdMSTZ5MWxhcHFTcjMrNFEzblN0dEE4WElaSjIrWWlMdGNDTlhUNXN0aFZcblpJQk85cVltN0VrNmp1KzI0ZXBBSTBMZStsdCs4SEpybEEwL0VVZFdPeUdwbTJSeVpNR25Ga1ZSVkFwZUo1cDhcbktOQ0NiR0UzbHNFSkg5VWo3bHQwQ0hNcjdaOS9SVm9Kc3lzRjFrK0w0UUtCZ1FEd0V3MTdHWUZLeHpTYzNGdmFcbk1hditZb0hkTTdSenl1WUtyVjFoeUpXdlZnblFLOWtjWW95QXBOeHRXMkdIRDZxOXlacWVmS21za1F5YVRNOFVcbldoQmFFNlp1Sloybmhpczk3VzRMMnlMOXAzTWdzSUcwTkNQU1Q1cWlnalBIa2VqU0s5cEQ3Zzh3dCtObWx3NXNcbnBKbkxSRWlYRkVpS0RhVjh6T01hRmFjLzVRS0JnUUMyR1RNdngrNUlBQ1RwWWlkVEtsUU1QVm9sWFgwbEM3bjhcbnZkcW9TcnRmQjFOMEZRVWp5QnYyODlRREJxak1Fc3g0cHhQd3hPVXNRZUI3SERyK1pzNnp1WXBmVklrYTlDRUxcbnNkcE5OZEtMNnBZSVRpclYzQXB3WkZFNUIxV0hYRS94L1l3THQ3L0VQV2VVTXBtdWpYdjVwYXVNemtvT3hhdzZcbkZxMmtwQ1ZXOFFLQmdEbVQrRG03NWp4dkJubFFpRjhPWnJlK0hITlhBVUo4T1N2NlpGa2RhSGtDNDV1ZnlKUjhcbk5rSnNSOG1hand0N2dISEFObXNvYnZGS1lJRnBSb2tWQ0xMeDM0cXZUdDVzMlZZaTZhL0NRT1FoRXdXV0Q2V09cbms4RFBBc0ZUQk9RcVpISnp2Z2Rvd3cvbUtvM2lVTDFSeE9qeFdZK1NkOHRVZG9vMkFZbXlBOGpaQW9HQVVCSGFcbm1RcnZjb2RsczUxdUx6cHdoSVZCcXlabGpiR1BVVnZsMXBRdHVZWk9Jci9QWjhGbzBuaTZZNjQ4OEtiZmM3bjdcbm9hWGlJMGpBUFVIWGl0K0h5aTB1YythRzl1d2J6N21maE0zQ3k3VVZ6WWNaZkxucGVTd1RMdUZUc1ZaYzdNb1JcbkYySHZsNGxzanhTRTBPVHplMzFtWmw2YTM4MzVnZjFRMHhFK1BmRUNnWUVBdjd2ZkJLMS8yUEpvNFNlMkd4ZmFcbjM0SGNEWHFnVXNYWUV3dmdBWE8vcXpYQmlkRGtLK25wUjVKelhoTDREY0VSOUxIak9TdlRiTllubm42ZmVXc0JcbjJ5dW1tdS8zbmQ5eVh6bHhUNURRVlVoWHNtUmYwWVdCMU9nQTZFYjJybWYydzZ4SFVlQ3dWdlRLM2tvdndteWpcbnErK2pUY1RwVnI4L3BCMnVhU1BlTlQwPVxuLS0tLS1FTkQgUFJJVkFURSBLRVktLS0tLVxuIiwKICJjbGllbnRfZW1haWwiOiAic3BlY3Ryby1pbWFnZXMtdmlld2VyQHNwZWN0cm8taW1hZ2VzLmlhbS5nc2VydmljZWFjY291bnQuY29tIiwKICJjbGllbnRfaWQiOiAiMTE1ODMwNTM0MzcyOTU2MzQwNDUzIiwKICJhdXRoX3VyaSI6ICJodHRwczovL2FjY291bnRzLmdvb2dsZS5jb20vby9vYXV0aDIvYXV0aCIsCiAidG9rZW5fdXJpIjogImh0dHBzOi8vb2F1dGgyLmdvb2dsZWFwaXMuY29tL3Rva2VuIiwKICJhdXRoX3Byb3ZpZGVyX3g1MDlfY2VydF91cmwiOiAiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vb2F1dGgyL3YxL2NlcnRzIiwKICJjbGllbnRfeDUwOV9jZXJ0X3VybCI6ICJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9yb2JvdC92MS9tZXRhZGF0YS94NTA5L3NwZWN0cm8taW1hZ2VzLXZpZXdlciU0MHNwZWN0cm8taW1hZ2VzLmlhbS5nc2VydmljZWFjY291bnQuY29tIgp9
- # name: content-sync-identity
- # storageResourceType: Secret
- # edgePropertiesSpec:
- # properties:
- # cert.base64: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSURvekNDQW91Z0F3SUJBZ0lRZU84WGxxQU1MaHh2dENhcDM1eWt0ekFOQmdrcWhraUc5dzBCQVFzRkFEQlMKTVFzd0NRWURWUVFHRXdKVlV6RWhNQjhHQTFVRUNoTVlSMlZ1WlhKaGJDQkZiR1ZqZEhKcFl5QkRiMjF3WVc1NQpNU0F3SGdZRFZRUURFeGRIUlNCRmVIUmxjbTVoYkNCU2IyOTBJRU5CSURJdU1UQWVGdzB4TlRBek1EVXdNREF3Ck1EQmFGdzB6TlRBek1EUXlNelU1TlRsYU1GSXhDekFKQmdOVkJBWVRBbFZUTVNFd0h3WURWUVFLRXhoSFpXNWwKY21Gc0lFVnNaV04wY21saklFTnZiWEJoYm5reElEQWVCZ05WQkFNVEYwZEZJRVY0ZEdWeWJtRnNJRkp2YjNRZwpRMEVnTWk0eE1JSUJJakFOQmdrcWhraUc5dzBCQVFFRkFBT0NBUThBTUlJQkNnS0NBUUVBekN6VDR3TlJadHIyClhUem9UTWpwcGp1bFpmRzM1L25PdDQ0cTJ6ZzQ3c3h3Z1o4bzRxamNyd3pJaHNudG9GclJRc3NqWFNGNXFYZEMKenNtMUc3ZjA0cUVCaW11T0gvWCtDaWRXWCtzdWRDUzhWeVJqWGk5Y3l2VVc0L21ZS0NMWHY1TTZIaEVvSUhDRApYZG82eVVyNW1TcmYxOHFSUjN5VUZ6MEhZWG9wYTJMczNRNmxCdkVVTzJYdzA0dnFWdm1nMWg3UzVqWXVab3ZDCm9JYmQyKzRRR2RvU1pQZ3RTTnBDeFNSK053dFBwellacG1xaVV1REdmVnBPM0hVNDJBUEIwYzYwRDkxY0pobzYKdFpwWFlIRHNSL1J4WUdtMDJLL2lNR2VmRDVGNFlNcnRvS29IYnNrdHk2K3U1RlVPclVnR0FUSkpHdHhsZWc1WApLb3RRWXU4UDF3SURBUUFCbzNVd2N6QVNCZ05WSFJNQkFmOEVDREFHQVFIL0FnRUNNQTRHQTFVZER3RUIvd1FFCkF3SUJCakF1QmdOVkhSRUVKekFscENNd0lURWZNQjBHQTFVRUF4TVdSMFV0VW05dmRDMURUMDB0VWxOQkxUSXcKTkRndE1UQWRCZ05WSFE0RUZnUVUzTjJtVUNKQkNMWWd0cFp5eEJlQk1Kd05adW93RFFZSktvWklodmNOQVFFTApCUUFEZ2dFQkFDRjRac2YyTm0wRnBWTmVBRFVIK3NsOG1GZ3dMN2RmTDcrNm43aE9nSDFaWGN2NnBEa29OdFZFCjBKL1pQZEhKVzZudGVkS0VadWl6RzVCQ2NsVUgzSXlZSzQvNEd4TnBGWHVnbVduS0d5MmZlWXdWYWU3UHV5ZDcKL2lLT0ZFR0NZeDRDNkUya3EzYUZqSnFpcTF2YmdTUy9CMGFndDFEM3JIM2kvK2RYVnh4OFpqaHlaTXVOK2NnUwpwWkw0Z25oblNYRkFHaXNzeEpoS3NOa1lndktkT0VUUk5uNWxFZ2ZnVnlQMmlPVnFFZ3VIazJHdTBnSFNvdUx1CjVhZC9xeU4rWmdiang4dkVXbHl3bWhYYjc4R2FmL0F3U0dBd1FQdG1RMDMxMGE0RHVsR3hvL2tjdVM3OHZGSDEKbXdKbUhtOUFJRm9xQmk4WHB1aEdtUTBudnltdXJFaz0KLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQ==
- # device.id: pftest-hospital-bm-3
- # device.ip: "192.168.100.12"
- # device.type: libvirt
- # http.proxy: "http://10.195.54.53:80"
- # https.proxy: "http://10.195.54.53:80"
- # hubble.port: ""
- # hubble.url: api.dev.spectrocloud.com
- # installation.mode: offline
- # libvirt.sock: /var/run/libvirt/libvirt-sock
- # libvirt.sshkey: libvirt.key
- # libvirt.sshuser: root
- # no.proxy: "localhost,127.0.0.1,3.70.0.0/16,192.168.0.0/16,10.0.0.0/8,.ge.com,192.168.100.1,11.0.0.0/22"
- # pod.subnet: 172.10.0.0/16
- # registry: harbor
- # service.subnet: 11.0.0.0/22
-
- ---
- apiVersion: v1
- data:
- user-data.tmpl: |
- #cloud-config
- write_files:
- - encoding: base64
- content: $${CONFIG_XML}
- owner: root:root
- path: /cf/conf/config.xml
- permissions: '0644'
- config-xml.tmpl: |
-
-
- 21.5
-
-
- normal
- gateway
- edison
-
- all
-
- system
- 1998
-
-
- admins
-
- system
- 1999
- 0
- page-all
-
-
- admin
-
- system
- admins
- $2y$10$K3ejlPZsxpqhd63EzyNgf.qK4RDhlSvW2ln.gIq7kbkVdlRiZ1XB2
- 0
- user-shell-access
- 2
- c3NoLXJzYSBBQUFBQjNOemFDMXljMkVBQUFBREFRQUJBQUFCZ1FDdzR4OW43NEtsVnRqUnBBRUhSVWJldnI2dVBDa0NnYkpHSTBKTUFhdnJ3K2JlTmI1V0wyOE1nOFU0MzBiWnhJRWFHU05pSXQreDk2aGFPY0RjOEFaMjRDa0Uxb1ZUS2x6aExyQkJGc0hmaVpuMHRYK1dtZWIyaGZBcmxQRnk2ZXJaZHRVTTUrYUQ2VFBYZmR4M0VWT2FBRzNncTFhbS9lSzFBM1NPVTVjTnFZMExjcWpGVDc0dG03T0NNc1VFSkg4emlCKzQ3ZTh2TWtDZjVIcTJqdVd5RjU0NTNiYzRaRHBMcW1hSXZ0UGpyN3NyQllLczI2MWpxRFlOWEF1aytyTExoRUt4VmNuY1o1U1pDbDJyWGVIdlRnb3lqZE1IWjYvWTNKZXhuYmg0Q2dkUVlmUU9HQStyQ01mVy9MTmFBV2hCUUlWRzZ1QU5mSEhhMTZQVE1XQTQzdlhTdUFCQ0VRQmR5VVhMQi9WWFlmcklOWTJQTnNPOG1FenVWcFRSaEQrSnRpT3BFejVQTlNXZk9qSUpSYjBBSjVDR2pHb0pSczdrck5QKzZsUWlTL0k5TnV4OThURjRla2FsYkMrOG9wUGFnTEhuL2NxaG9sSm1qQXp6QzV1by9yY1VKcldUNEYvODNoYmpUdEhKT01KdEZXV2lZcURmUWVoOENGOUJoeGs9IHJvb3RAbG9jYWxob3N0
-
- pfSense.css
-
-
- user
- $2y$10$5i3toKDJLCdLS0w71m7TF.49vokuIneuA97Tq3cJYotKzeEbn0Lqu
-
- sysgwservice
-
- 2
-
-
- pfSense.css
- 2001
- user-shell-access
-
- 2000
- 2000
- $${NTP}
-
- http
-
- 602232b5962a3
- 2
- 28080
- 2
- pfSense.css
- 1e3f75;
-
-
- yes
-
-
-
- 400000
- hadp
- hadp
- hadp
-
- monthly
-
-
-
- enabled
- enabled
- 25812
-
- Etc/UTC
- 115200
- serial
-
-
-
-
- en_US
-
-
- $${DNS_1}
- $${DNS_2}
-
-
-
-
-
- vtnet1
-
- $${IP_ADDR_WAN}
-
-
- 32
-
-
-
-
-
-
-
- SavedCfg
-
-
-
-
-
-
-
- dhcp6
-
- 0
- wan
-
-
-
-
- vtnet0
-
-
- $${IP_ADDR_LAN}
- $${SUBNET_LAN}
-
-
-
-
-
-
-
- $${DHCP_RANGE_START}
- $${DHCP_RANGE_END}
-
-
-
-
- 94670856
- 189341712
-
-
-
-
-
-
-
-
- hmac-md5
-
-
-
- allow
- gateway.edison
-
-
-
-
-
-
-
-
-
-
-
-
-
- ::1000
- ::2000
-
- assist
- medium
-
-
-
-
-
- public
-
-
-
-
-
- 1
-
-
-
- automatic
-
-
-
-
-
- 1626111739
- pass
- wan
- inet
-
-
-
-
-
-
-
-
-
- tcp
-
-
-
- 25812
-
-
-
-
- pass
- inet
-
- lan
- 0100000101
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
- -
- 1,31
- 0-5
- *
- *
- *
- root
- /usr/bin/nice -n20 adjkerntz -a
-
- -
- 1
- 3
- 1
- *
- *
- root
- /usr/bin/nice -n20 /etc/rc.update_bogons.sh
-
- -
- 1
- 1
- *
- *
- *
- root
- /usr/bin/nice -n20 /etc/rc.dyndns.update
-
- -
- */60
- *
- *
- *
- *
- root
- /usr/bin/nice -n20 /usr/local/sbin/expiretable -v -t 3600 virusprot
-
- -
- 30
- 12
- *
- *
- *
- root
- /usr/bin/nice -n20 /etc/rc.update_urltables
-
- -
- 1
- 0
- *
- *
- *
- root
- /usr/bin/nice -n20 /etc/rc.update_pkg_metadata
-
- -
- */1
- *
- *
- *
- *
- root
- /usr/sbin/newsyslog
-
- -
- 1
- 3
- *
- *
- *
- root
- /etc/rc.periodic daily
-
- -
- 15
- 4
- *
- *
- 6
- root
- /etc/rc.periodic weekly
-
- -
- 30
- 5
- 1
- *
- *
- root
- /etc/rc.periodic monthly
-
-
-
-
-
-
-
- system_information:col1:show,netgate_services_and_support:col2:show,interfaces:col2:show
- 10
-
-
-
-
- all
- all
-
-
-
-
-
- 602232b5962a3
-
-
-
- transparent
-
-
-
-
-
-
-
-
- 602232b5962a3
-
- server
- LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUVoRENDQTJ5Z0F3SUJBZ0lCQURBTkJna3Foa2lHOXcwQkFRc0ZBREJhTVRnd05nWURWUVFLRXk5d1psTmwKYm5ObElIZGxZa052Ym1acFozVnlZWFJ2Y2lCVFpXeG1MVk5wWjI1bFpDQkRaWEowYVdacFkyRjBaVEVlTUJ3RwpBMVVFQXhNVmNHWlRaVzV6WlMwMk1ESXlNekppTlRrMk1tRXpNQjRYRFRJeE1ESXdPVEEyTlRrd01Wb1hEVEl5Ck1ETXhOREEyTlRrd01Wb3dXakU0TURZR0ExVUVDaE12Y0daVFpXNXpaU0IzWldKRGIyNW1hV2QxY21GMGIzSWcKVTJWc1ppMVRhV2R1WldRZ1EyVnlkR2xtYVdOaGRHVXhIakFjQmdOVkJBTVRGWEJtVTJWdWMyVXROakF5TWpNeQpZalU1TmpKaE16Q0NBU0l3RFFZSktvWklodmNOQVFFQkJRQURnZ0VQQURDQ0FRb0NnZ0VCQU1ZblNtUG93OUt6Cml5b2ZVeW5ZaGN2YnN3YTVaVFVYTVVmZGdjaTlzamp2Ymw1eUhHZlhaY1BCdzRuUk14eUQ4enE0dEtuZ2EzSEkKNFNqbDJkamljdHM2c0loQ1FxbUg4bGpUWWVjRkw3WDlpRUdWaHpnU0tPcURzS3NUeGhaMEs0TkVwMlM1bUdTUgowZ0pWbTlBeTJDUzhVWUFjRFUrVmd5bVR5cVVUZW5CcEVyVzlWSEZmUnhxZTBUcFlmT2tXbjRSb3RxMnNrRGlhCnN4dWhWd2pMU3dNR2lqSnJ0aTM3UGQyRnd0UW1CRGY4TEI3NmhuY1BpMmVDWXE1Ly9EZnllZkRPOVVEZ0ZxL3QKNHp6d3JkT2FTcGJoVzVRV0RjWGtNQU96OGNxTTJZZVFxVVZGRGV0aFFPcnZrMjQyaXRMTWtzWmh5aHFOL21ITgpTN0JZN3F6QXVCY0NBd0VBQWFPQ0FWTXdnZ0ZQTUFrR0ExVWRFd1FDTUFBd0VRWUpZSVpJQVliNFFnRUJCQVFECkFnWkFNQXNHQTFVZER3UUVBd0lGb0RBekJnbGdoa2dCaHZoQ0FRMEVKaFlrVDNCbGJsTlRUQ0JIWlc1bGNtRjAKWldRZ1UyVnlkbVZ5SUVObGNuUnBabWxqWVhSbE1CMEdBMVVkRGdRV0JCUzY1eDhTQmJhY3FsN3gzL05UcEFqTgpPUlpaNmpDQmdnWURWUjBqQkhzd2VZQVV1dWNmRWdXMm5LcGU4ZC96VTZRSXpUa1dXZXFoWHFSY01Gb3hPREEyCkJnTlZCQW9UTDNCbVUyVnVjMlVnZDJWaVEyOXVabWxuZFhKaGRHOXlJRk5sYkdZdFUybG5ibVZrSUVObGNuUnAKWm1sallYUmxNUjR3SEFZRFZRUURFeFZ3WmxObGJuTmxMVFl3TWpJek1tSTFPVFl5WVRPQ0FRQXdKd1lEVlIwbApCQ0F3SGdZSUt3WUJCUVVIQXdFR0NDc0dBUVVGQndNQ0JnZ3JCZ0VGQlFnQ0FqQWdCZ05WSFJFRUdUQVhnaFZ3ClpsTmxibk5sTFRZd01qSXpNbUkxT1RZeVlUTXdEUVlKS29aSWh2Y05BUUVMQlFBRGdnRUJBTVFoWi9JNy8vbVAKSVh0dHo0Y0V3MlQ5SjdlSlpGQStSUy9UcStPUVlMU2tSWTNTQVh0cElYWWRhTkdFYWxuVDM4ZmZ6VllFb2hLawpKTXpTUURkUkxYYmZRS2oxZi9ZM1B0NFlOeFg2Q2lFTUI4cWFINFlOZmphVk1pdVloaUIwKzZiaW9Ic1plR2tECitQYlEzTzZ3cUg3aWVlMEM0Q2FHUUhWVFphQ2dOelBFUU9WYmdJc245Wm1UZlBETHF4OG9RZkRybm1ETzFwV1kKWHowMDlWTmRyb3VWNlVEc0xlSTd2VjAwQ3RNa2NFSU1ZTlBaSHliYjZ4Y3EzRU1BQjlYdG9BeklLYXdGWDA3bwo1bkR1T3dqZ0d2SzdqcHp1bmxpNm54TVNlYlpocjdXOWQra1ZUQU1Qbk5zd2ZsY0hvZXo5NXFYVGZzSjA3MXVzCmZYLzloSWZ6cHlzPQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0tCg==
- LS0tLS1CRUdJTiBQUklWQVRFIEtFWS0tLS0tCk1JSUV2UUlCQURBTkJna3Foa2lHOXcwQkFRRUZBQVNDQktjd2dnU2pBZ0VBQW9JQkFRREdKMHBqNk1QU3M0c3EKSDFNcDJJWEwyN01HdVdVMUZ6RkgzWUhJdmJJNDcyNWVjaHhuMTJYRHdjT0owVE1jZy9NNnVMU3A0R3R4eU9Fbwo1ZG5ZNG5MYk9yQ0lRa0twaC9KWTAySG5CUysxL1loQmxZYzRFaWpxZzdDckU4WVdkQ3VEUktka3VaaGtrZElDClZadlFNdGdrdkZHQUhBMVBsWU1wazhxbEUzcHdhUksxdlZSeFgwY2FudEU2V0h6cEZwK0VhTGF0ckpBNG1yTWIKb1ZjSXkwc0RCb295YTdZdCt6M2RoY0xVSmdRMy9Dd2Urb1ozRDR0bmdtS3VmL3czOG5ud3p2VkE0QmF2N2VNOAo4SzNUbWtxVzRWdVVGZzNGNURBRHMvSEtqTm1Ia0tsRlJRM3JZVURxNzVOdU5vclN6SkxHWWNvYWpmNWh6VXV3CldPNnN3TGdYQWdNQkFBRUNnZ0VBU1BrWlBUZndQYjBmSi9FSFNHYlR4eUp1aUxCbjVDaFpYclRsWWM3eVBtTjIKaXRPTlFIU2pwbmJLRXd2MnZhNmVPV2JpS1psYkdEazVLajJUZjNyTmY0emFqQ09TdHlvNGhwdTU5MEVDdEF2ZApUUUhMSFV3YXcxcWlDaTZyQ1lQWXJtdnZGWG4vOURGN1hKNjRWZkVUaGx2UHk5VUkxZUVpdjRJTTQ4UUxtQytECjZ2dlNRUXQzc0dxVXJjTlFPejRROUQ1RmJMVThYWFFqZEhFVThWY1oyQlBSWU1tZHRwS0FVUUo2M3MxVDliR3kKaUhMWFdRb1BFWkpZN3lLWERjYmQ2T20zKzB4VXhEVFlmdTExdDk5dHBLaWFmM0plcFVHTEMvN0pwZ2hjN21seApmUWVhSWtoTGphSGZJalNBUWV6UTJqd1hmWGJib1AvNXp2dENhK0Q1QVFLQmdRRGtvYVhHZHBNNC9yUFgyTThaCk1wSlhBZEhvRE5CRDVWVXlqTnphRnBvTFFZa1lDWHdESHgvZnR5VDJHcnJibmdScEdzdndzWEVnS09NK3BGOGoKeVI0OG9LU1A3b1lpMFp5SUpKVERJZ2djczltMXRUVk1xd0NLNGVqTmNzZnV4ZGhma255U2l2bm45N0VnaFUrOQpQREpCamdRZys2NytIdEtBQmRzNG9mSnVnUUtCZ1FEZDM2ZGNvSEJoc1liYWlEM2cyVFdHTDhHeDNNWEI2dlQyCjFJamlISFg0U0xEQi9nMWlGUW5jQkkzM01SV3Q1ajhVc3dZWnE5bWdSZDlXNy95eDNLcFlYUU8rdDZKakcrVkQKVm43T2xPQ2s5Y3FCN0Vhbm9ZaFRmcXVSbGhYUkoxK1gyem9ITmJ6Zm0xbU5YNWtKeWdPZ1BlYy9PWmpZWnV1ZQpkWnFVUHpDS2x3S0JnUUNkVmRCckh6WjVSbTRrNEFMRTZGUmtvOC83ekxXVnc4QTVwVTVtbHA4emw4bnNUTHFXCnUxaUg3OVR6dDdsWFlRclArYXpXMm9WckNEZzU1NHZCYUtueU51cUlVakRrelRMWFRiVkNBNjFwY3lzakxLeWQKTWlDUmNwMXVwTXFKdW9VSVlCcHlEa21rSklRMFNRbHl4SFloUHNZQW5MTDBGZW51U0Jsd2c2SCtBUUtCZ0FxWgprOHBXRStobTdkVms2dWZpR0srSGVvNS95S2JpRGIvNUdXME1CUXJPbGpzK1l1OUxxQVB0NThabWZ1Mm04VjZRCnl2clEvZjJ0T09WVFk5UXZnYWZsRWJlNzJETTU4dk1MbjlWSXdZNi8rMlBIb1B5RE9Cc2RFeDlLZDFKZHVqQnMKRmRLZ1c1eW5ORmtQdHlVRGxMYkVIbGNqN2t4d1psTlV0UVRlRy9pVEFvR0FHb1d5QmJBeWcxbjhWcVBZMm9oTgpnR0FNRm13bzBtQjhWZXN2TFh4TzZubFlKMUI5RWRHVDNvR0FFSzFkc084ZzR1WXMwMkFQYWxEUnFoaUIzV2JGCnVPQ1dxRDljZ2Z6TGdWaXdkOHVLTXFhSHNOTjRWeXpIZ0UyYWY3QnlLa2czQWRBUXFDNk56ZXVvRzRhYm14OWcKTzRwVWFqZWdBd3Nic2lUektmYVpUNEE9Ci0tLS0tRU5EIFBSSVZBVEUgS0VZLS0tLS0K
-
-
-
- API
- api
-
- https://github.com/jaredhendrickson13/pfsense-api
- System
- %%PKGVERSION%%
- api.xml
- jaredhendrickson13@gmail.com
-
-
-
- lan
- jwt
- json
- e568b85aed939826ead0199aa4238e399c9e12806f4dc655f8ee4a74818a19a2
- 3600
- sha256
- 16
-
-
-
-
-
- /api/
-
-
-
-
- /api/v1/
-
-
-
-
-
-
-
-
-
- 5
- enabled
-
-
- auto
- lan
- yes
- yes
-
-
-
-
-
-
-
-
-
- edison
- !
-
-
-
-
- kind: ConfigMap
- metadata:
- name: pfsense-vm-files
- ---
- apiVersion: v1
- data:
- credentials: VVJJX1RFTVBMQVRFOiAicWVtdStzc2g6Ly8lc0Alcy9zeXN0ZW0/a2V5ZmlsZT0lcyZzb2NrZXQ9JXMma25vd25faG9zdHNfdmVyaWZ5PWlnbm9yZSI=
- kind: Secret
- metadata:
- name: libvirt-account-creds
- type: Opaque
- ---
- apiVersion: terraform.core.oam.dev/v1beta1
- kind: Provider
- metadata:
- name: libvirt
- spec:
- provider: custom
- credentials:
- source: Secret
- secretRef:
- name: libvirt-account-creds
- key: credentials
- ---
- apiVersion: terraform.core.oam.dev/v1beta1
- kind: Configuration
- metadata:
- name: pfsense-gateway-vm-config
- spec:
- volumeSpec:
- volumeMounts:
- - name: libvirt
- mountPath: /var/run/libvirt/libvirt-sock
- - name: spectro-directory
- mountPath: /opt/spectrocloud
- - mountPath: "/var/files"
- name: files-vol
- volumes:
- - name: files-vol
- configMap:
- name: pfsense-vm-files
- - name: libvirt
- hostPath:
- path: /var/run/libvirt/libvirt-sock
- type: Socket
- - name: spectro-directory
- hostPath:
- path: /opt/spectrocloud
- type: Directory
- deleteResource: true
- variable:
- VM_NAME: "pfsense-gateway-vm"
- NTP: FROM_SECRET_REF
- DNS_1: FROM_SECRET_REF
- DNS_2: FROM_SECRET_REF
- IP_ADDR_WAN: FROM_SECRET_REF
- IP_ADDR_LAN: FROM_SECRET_REF
- SUBNET_LAN: FROM_SECRET_REF
- DHCP_RANGE_START: FROM_SECRET_REF
- DHCP_RANGE_END: FROM_SECRET_REF
- providerRef:
- name: libvirt
- hcl: |
- terraform {
- required_version = ">= 0.13"
- required_providers {
- libvirt = {
- source = "dmacvicar/libvirt"
- version = "0.6.14"
- }
- }
- }
-
- ##### VARIABLES #####
- variable "URI_TEMPLATE" {
- type = string
- default = "qemu+ssh://%s@%s/system?keyfile=%s&socket=%s&known_hosts_verify=ignore"
- }
-
- variable "NTP" {
- type = string
- }
-
- variable "DNS_1" {
- type = string
- }
-
- variable "DNS_2" {
- type = string
- }
-
- variable "IP_ADDR_WAN" {
- type = string
- }
-
- variable "IP_ADDR_LAN" {
- type = string
- }
-
- variable "SUBNET_LAN" {
- type = string
- }
-
- variable "DHCP_RANGE_START" {
- type = string
- }
-
- variable "DHCP_RANGE_END" {
- type = string
- }
-
- variable "LIBVIRT_SOCKET" {
- type = string
- default = "/var/run/libvirt/libvirt-sock"
- }
-
- variable "VM_NAME" {
- type = string
- default = "pfsense-terraform"
- }
-
- ##### OUTPUT #####
- output "config_value" {
- value = data.template_file.config.rendered
- }
-
- output "user_data_value" {
- value = data.template_file.user_data.rendered
- }
-
- ##### PROVIDER #####
- provider "libvirt" {
- uri = "qemu:///system"
- #uri = format(var.URI_TEMPLATE, var.SSH_USER, var.HOST_IP, var.SSH_KEY, var.LIBVIRT_SOCKET)
- }
-
- data "template_file" "config" {
- template = file("/var/files/config-xml.tmpl")
- vars = {
- NTP = var.NTP
- DNS_1 = var.DNS_1
- DNS_2 = var.DNS_2
- IP_ADDR_WAN = var.IP_ADDR_WAN
- IP_ADDR_LAN = var.IP_ADDR_LAN
- SUBNET_LAN = var.SUBNET_LAN
- DHCP_RANGE_START = var.DHCP_RANGE_START
- DHCP_RANGE_END = var.DHCP_RANGE_END
- }
- }
-
- data "template_file" "user_data" {
- template = file("/var/files/user-data.tmpl")
- vars = {
- CONFIG_XML = base64encode(data.template_file.config.rendered)
- }
- }
-
- resource "libvirt_pool" "ubuntu" {
- name = "ubuntuop"
- type = "dir"
- path = "/var/lib/libvirt/terraform-provider-libvirt-pool-ubuntuoperator"
- }
-
- resource "libvirt_volume" "ubuntu-qcow2" {
- name = "ubuntu-qcow2"
- pool = libvirt_pool.ubuntu.name
- source = "/opt/spectrocloud/vm-operator/pfsense-cloudinit-base-gehc-2.5.1-SC.qcow2"
- format = "qcow2"
- }
-
- resource "libvirt_cloudinit_disk" "commoninit" {
- name = "commoninit.iso"
- user_data = data.template_file.user_data.rendered
- pool = libvirt_pool.ubuntu.name
- }
-
- resource "libvirt_domain" "domain-ubuntu" {
- name = var.VM_NAME
- memory = "2048"
- vcpu = 1
-
- cloudinit = libvirt_cloudinit_disk.commoninit.id
-
- network_interface {
- bridge = "br0"
- }
-
- network_interface {
- bridge = "br1"
- }
-
- console {
- type = "pty"
- target_port = "0"
- target_type = "serial"
- }
-
- disk {
- volume_id = libvirt_volume.ubuntu-qcow2.id
- }
-
- graphics {
- type = "vnc"
- listen_type = "address"
- listen_address = "0.0.0.0"
- autoport = true
- }
- }
diff --git a/examples/libvirt/config/profile/profile1-export-full.yaml b/examples/libvirt/config/profile/profile1-export-full.yaml
deleted file mode 100644
index 73704c1..0000000
--- a/examples/libvirt/config/profile/profile1-export-full.yaml
+++ /dev/null
@@ -1,1153 +0,0 @@
-name: herculus-cp NC_test
-uid: 61e99a11b7e45ea188c87c3a
-description: ""
-type: cluster
-cloudType: libvirt
-packs:
- - name: centos-libvirt
- type: spectro
- layer: os
- registry_uid: 61b9a3180c4cf13de4695bbf
- registry: Public Repo
- version: "7.9"
- tag: "7.9"
- values: |-
- # Spectro Golden images includes most of the hardening standards recommended by CIS benchmarking v1.5
-
- # Uncomment below section to
- # 1. Include custom files to be copied over to the nodes and/or
- # 2. Execute list of commands before or after kubeadm init/join is executed
- #
- #kubeadmconfig:
- # preKubeadmCommands:
- # - echo "Executing pre kube admin config commands"
- # - update-ca-certificates
- # - 'systemctl restart containerd; sleep 3'
- # - 'while [ ! -S /var/run/containerd/containerd.sock ]; do echo "Waiting for containerd..."; sleep 1; done'
- # postKubeadmCommands:
- # - echo "Executing post kube admin config commands"
- # files:
- # - targetPath: /usr/local/share/ca-certificates/mycom.crt
- # targetOwner: "root:root"
- # targetPermissions: "0644"
- # content: |
- # -----BEGIN CERTIFICATE-----
- # MIICyzCCAbOgAwIBAgIBADANBgkqhkiG9w0BAQsFADAVMRMwEQYDVQQDEwprdWJl
- # cm5ldGVzMB4XDTIwMDkyMjIzNDMyM1oXDTMwMDkyMDIzNDgyM1owFTETMBEGA1UE
- # AxMKa3ViZXJuZXRlczCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAMdA
- # nZYs1el/6f9PgV/aO9mzy7MvqaZoFnqO7Qi4LZfYzixLYmMUzi+h8/RLPFIoYLiz
- # qiDn+P8c9I1uxB6UqGrBt7dkXfjrUZPs0JXEOX9U/6GFXL5C+n3AUlAxNCS5jobN
- # fbLt7DH3WoT6tLcQefTta2K+9S7zJKcIgLmBlPNDijwcQsbenSwDSlSLkGz8v6N2
- # 7SEYNCV542lbYwn42kbcEq2pzzAaCqa5uEPsR9y+uzUiJpv5tDHUdjbFT8tme3vL
- # 9EdCPODkqtMJtCvz0hqd5SxkfeC2L+ypaiHIxbwbWe7GtliROvz9bClIeGY7gFBK
- # jZqpLdbBVjo0NZBTJFUCAwEAAaMmMCQwDgYDVR0PAQH/BAQDAgKkMBIGA1UdEwEB
- # /wQIMAYBAf8CAQAwDQYJKoZIhvcNAQELBQADggEBADIKoE0P+aVJGV9LWGLiOhki
- # HFv/vPPAQ2MPk02rLjWzCaNrXD7aPPgT/1uDMYMHD36u8rYyf4qPtB8S5REWBM/Y
- # g8uhnpa/tGsaqO8LOFj6zsInKrsXSbE6YMY6+A8qvv5lPWpJfrcCVEo2zOj7WGoJ
- # ixi4B3fFNI+wih8/+p4xW+n3fvgqVYHJ3zo8aRLXbXwztp00lXurXUyR8EZxyR+6
- # b+IDLmHPEGsY9KOZ9VLLPcPhx5FR9njFyXvDKmjUMJJgUpRkmsuU1mCFC+OHhj56
- # IkLaSJf6z/p2a3YjTxvHNCqFMLbJ2FvJwYCRzsoT2wm2oulnUAMWPI10vdVM+Nc=
- # -----END CERTIFICATE-----
- - name: kubernetes-konvoy
- type: spectro
- layer: k8s
- registry_uid: 61b9a3180c4cf13de4695bbf
- registry: Public Repo
- version: 1.21.6
- tag: 1.21.6
- values: |-
- # spectrocloud.com/enabled-presets: Kube Controller Manager:loopback-ctrlmgr,Kube Scheduler:loopback-scheduler
- pack:
- k8sHardening: True
- #CIDR Range for Pods in cluster
- # Note : This must not overlap with any of the host or service network
- podCIDR: "192.168.0.0/16"
- #CIDR notation IP range from which to assign service cluster IPs
- # Note : This must not overlap with any IP ranges assigned to nodes for pods.
- serviceClusterIpRange: "10.96.0.0/12"
-
- # KubeAdm customization for kubernetes hardening. Below config will be ignored if k8sHardening property above is disabled
- kubeadmconfig:
- apiServer:
- extraArgs:
- # Note : secure-port flag is used during kubeadm init. Do not change this flag on a running cluster
- secure-port: "6443"
- anonymous-auth: "true"
- insecure-port: "0"
- profiling: "false"
- disable-admission-plugins: "AlwaysAdmit"
- default-not-ready-toleration-seconds: "60"
- default-unreachable-toleration-seconds: "60"
- enable-admission-plugins: "AlwaysPullImages,NamespaceLifecycle,ServiceAccount,NodeRestriction,PodSecurityPolicy"
- audit-log-path: /var/log/apiserver/audit.log
- audit-policy-file: /etc/kubernetes/audit-policy.yaml
- audit-log-maxage: "30"
- audit-log-maxbackup: "10"
- audit-log-maxsize: "100"
- authorization-mode: RBAC,Node
- tls-cipher-suites: "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256"
- extraVolumes:
- - name: audit-log
- hostPath: /var/log/apiserver
- mountPath: /var/log/apiserver
- pathType: DirectoryOrCreate
- - name: audit-policy
- hostPath: /etc/kubernetes/audit-policy.yaml
- mountPath: /etc/kubernetes/audit-policy.yaml
- readOnly: true
- pathType: File
- controllerManager:
- extraArgs:
- profiling: "false"
- terminated-pod-gc-threshold: "25"
- pod-eviction-timeout: "1m0s"
- use-service-account-credentials: "true"
- feature-gates: "RotateKubeletServerCertificate=true"
- scheduler:
- extraArgs:
- profiling: "false"
- kubeletExtraArgs:
- read-only-port : "0"
- event-qps: "0"
- feature-gates: "RotateKubeletServerCertificate=true"
- protect-kernel-defaults: "true"
- tls-cipher-suites: "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256"
- files:
- - path: hardening/audit-policy.yaml
- targetPath: /etc/kubernetes/audit-policy.yaml
- targetOwner: "root:root"
- targetPermissions: "0600"
- - path: hardening/privileged-psp.yaml
- targetPath: /etc/kubernetes/hardening/privileged-psp.yaml
- targetOwner: "root:root"
- targetPermissions: "0600"
- - path: hardening/90-kubelet.conf
- targetPath: /etc/sysctl.d/90-kubelet.conf
- targetOwner: "root:root"
- targetPermissions: "0600"
- preKubeadmCommands:
- # For enabling 'protect-kernel-defaults' flag to kubelet, kernel parameters changes are required
- - 'echo "====> Applying kernel parameters for Kubelet"'
- - 'sysctl -p /etc/sysctl.d/90-kubelet.conf'
- postKubeadmCommands:
- # Apply the privileged PodSecurityPolicy on the first master node ; Otherwise, CNI (and other) pods won't come up
- - 'export KUBECONFIG=/etc/kubernetes/admin.conf'
- # Sometimes api server takes a little longer to respond. Retry if applying the pod-security-policy manifest fails
- - '[ -f "$KUBECONFIG" ] && { echo " ====> Applying PodSecurityPolicy" ; until $(kubectl apply -f /etc/kubernetes/hardening/privileged-psp.yaml > /dev/null ); do echo "Failed to apply PodSecurityPolicies, will retry in 5s" ; sleep 5 ; done ; } || echo "Skipping PodSecurityPolicy for worker nodes"'
-
- # Client configuration to add OIDC based authentication flags in kubeconfig
- #clientConfig:
- #oidc-issuer-url: "{{ .spectro.pack.kubernetes.kubeadmconfig.apiServer.extraArgs.oidc-issuer-url }}"
- #oidc-client-id: "{{ .spectro.pack.kubernetes.kubeadmconfig.apiServer.extraArgs.oidc-client-id }}"
- #oidc-client-secret: 1gsranjjmdgahm10j8r6m47ejokm9kafvcbhi3d48jlc3rfpprhv
- #oidc-extra-scope: profile,email
- - name: cni-calico
- type: spectro
- layer: cni
- registry_uid: 61b9a3180c4cf13de4695bbf
- registry: Public Repo
- version: 3.19.0
- tag: 3.19.0
- values: |-
- manifests:
- calico:
-
- # IPAM type to use. Supported types are calico-ipam, host-local
- ipamType: "calico-ipam"
-
- # Should be one of CALICO_IPV4POOL_IPIP or CALICO_IPV4POOL_VXLAN
- encapsulationType: "CALICO_IPV4POOL_IPIP"
-
- # Should be one of Always, CrossSubnet, Never
- encapsulationMode: "Always"
- - name: csi-rook-ceph
- type: spectro
- layer: csi
- registry_uid: 61b9a3180c4cf13de4695bbf
- registry: Public Repo
- version: 1.8.0
- tag: 1.8.0
- values: |-
- # spectrocloud.com/enabled-presets: Storage - Block Devices:multi-node-block
- manifests:
- storageclass:
- contents: |
- apiVersion: ceph.rook.io/v1
- kind: CephFilesystem
- metadata:
- name: myfs
- namespace: rook-ceph # namespace:cluster
- spec:
- # The metadata pool spec. Must use replication.
- metadataPool:
- replicated:
- size: 3
- requireSafeReplicaSize: true
- parameters:
- # Inline compression mode for the data pool
- # Further reference: https://docs.ceph.com/docs/master/rados/configuration/bluestore-config-ref/#inline-compression
- compression_mode:
- none
- # gives a hint (%) to Ceph in terms of expected consumption of the total cluster capacity of a given pool
- # for more info: https://docs.ceph.com/docs/master/rados/operations/placement-groups/#specifying-expected-pool-size
- #target_size_ratio: ".5"
- # The list of data pool specs. Can use replication or erasure coding.
- dataPools:
- - name: replicated
- failureDomain: host
- replicated:
- size: 3
- # Disallow setting pool with replica 1, this could lead to data loss without recovery.
- # Make sure you're *ABSOLUTELY CERTAIN* that is what you want
- requireSafeReplicaSize: true
- parameters:
- # Inline compression mode for the data pool
- # Further reference: https://docs.ceph.com/docs/master/rados/configuration/bluestore-config-ref/#inline-compression
- compression_mode:
- none
- # gives a hint (%) to Ceph in terms of expected consumption of the total cluster capacity of a given pool
- # for more info: https://docs.ceph.com/docs/master/rados/operations/placement-groups/#specifying-expected-pool-size
- #target_size_ratio: ".5"
- # Whether to preserve filesystem after CephFilesystem CRD deletion
- preserveFilesystemOnDelete: true
- # The metadata service (mds) configuration
- metadataServer:
- # The number of active MDS instances
- activeCount: 1
- # Whether each active MDS instance will have an active standby with a warm metadata cache for faster failover.
- # If false, standbys will be available, but will not have a warm cache.
- activeStandby: true
- # The affinity rules to apply to the mds deployment
- placement:
- # nodeAffinity:
- # requiredDuringSchedulingIgnoredDuringExecution:
- # nodeSelectorTerms:
- # - matchExpressions:
- # - key: role
- # operator: In
- # values:
- # - mds-node
- # topologySpreadConstraints:
- # tolerations:
- # - key: mds-node
- # operator: Exists
- # podAffinity:
- podAntiAffinity:
- requiredDuringSchedulingIgnoredDuringExecution:
- - labelSelector:
- matchExpressions:
- - key: app
- operator: In
- values:
- - rook-ceph-mds
- # topologyKey: kubernetes.io/hostname will place MDS across different hosts
- topologyKey: kubernetes.io/hostname
- preferredDuringSchedulingIgnoredDuringExecution:
- - weight: 100
- podAffinityTerm:
- labelSelector:
- matchExpressions:
- - key: app
- operator: In
- values:
- - rook-ceph-mds
- # topologyKey: */zone can be used to spread MDS across different AZ
- # Use in k8s cluster if your cluster is v1.16 or lower
- # Use in k8s cluster is v1.17 or upper
- topologyKey: topology.kubernetes.io/zone
- # A key/value list of annotations
- annotations:
- # key: value
- # A key/value list of labels
- labels:
- # key: value
- resources:
- # The requests and limits set here, allow the filesystem MDS Pod(s) to use half of one CPU core and 1 gigabyte of memory
- # limits:
- # cpu: "500m"
- # memory: "1024Mi"
- # requests:
- # cpu: "500m"
- # memory: "1024Mi"
- # priorityClassName: my-priority-class
- # Filesystem mirroring settings
- # mirroring:
- # enabled: true
- # list of Kubernetes Secrets containing the peer token
- # for more details see: https://docs.ceph.com/en/latest/dev/cephfs-mirroring/#bootstrap-peers
- # peers:
- #secretNames:
- #- secondary-cluster-peer
- # specify the schedule(s) on which snapshots should be taken
- # see the official syntax here https://docs.ceph.com/en/latest/cephfs/snap-schedule/#add-and-remove-schedules
- # snapshotSchedules:
- # - path: /
- # interval: 24h # daily snapshots
- # startTime: 11:55
- # manage retention policies
- # see syntax duration here https://docs.ceph.com/en/latest/cephfs/snap-schedule/#add-and-remove-retention-policies
- # snapshotRetention:
- # - path: /
- # duration: "h 24"
- ---
- apiVersion: storage.k8s.io/v1
- kind: StorageClass
- metadata:
- name: spectro-storage-class
- annotations:
- storageclass.kubernetes.io/is-default-class: "true"
- # Change "rook-ceph" provisioner prefix to match the operator namespace if needed
- provisioner: rook-ceph.cephfs.csi.ceph.com # driver:namespace:operator
- parameters:
- # clusterID is the namespace where the rook cluster is running
- # If you change this namespace, also change the namespace below where the secret namespaces are defined
- clusterID: rook-ceph # namespace:cluster
-
- # CephFS filesystem name into which the volume shall be created
- fsName: myfs
-
- # Ceph pool into which the volume shall be created
- # Required for provisionVolume: "true"
- pool: myfs-data0
-
- # The secrets contain Ceph admin credentials. These are generated automatically by the operator
- # in the same namespace as the cluster.
- csi.storage.k8s.io/provisioner-secret-name: rook-csi-cephfs-provisioner
- csi.storage.k8s.io/provisioner-secret-namespace: rook-ceph # namespace:cluster
- csi.storage.k8s.io/controller-expand-secret-name: rook-csi-cephfs-provisioner
- csi.storage.k8s.io/controller-expand-secret-namespace: rook-ceph # namespace:cluster
- csi.storage.k8s.io/node-stage-secret-name: rook-csi-cephfs-node
- csi.storage.k8s.io/node-stage-secret-namespace: rook-ceph # namespace:cluster
-
- # (optional) The driver can use either ceph-fuse (fuse) or ceph kernel client (kernel)
- # If omitted, default volume mounter will be used - this is determined by probing for ceph-fuse
- # or by setting the default mounter explicitly via --volumemounter command-line argument.
- # mounter: kernel
- reclaimPolicy: Delete
- allowVolumeExpansion: true
- #Supported binding modes are Immediate, WaitForFirstConsumer
- volumeBindingMode: "WaitForFirstConsumer"
- mountOptions:
- # uncomment the following line for debugging
- #- debug
-
- cluster:
- contents: |
- apiVersion: ceph.rook.io/v1
- kind: CephCluster
- metadata:
- name: rook-ceph
- namespace: rook-ceph # namespace:cluster
- spec:
- cephVersion:
- # The container image used to launch the Ceph daemon pods (mon, mgr, osd, mds, rgw).
- # v15 is octopus, and v16 is pacific.
- # RECOMMENDATION: In production, use a specific version tag instead of the general v14 flag, which pulls the latest release and could result in different
- # versions running within the cluster. See tags available at https://hub.docker.com/r/ceph/ceph/tags/.
- # If you want to be more precise, you can always use a timestamp tag such quay.io/ceph/ceph:v16.2.7-20211208
- # This tag might not contain a new Ceph version, just security fixes from the underlying operating system, which will reduce vulnerabilities
- image: quay.io/ceph/ceph:v16.2.7
- # Whether to allow unsupported versions of Ceph. Currently `octopus` and `pacific` are supported.
- # Future versions such as `pacific` would require this to be set to `true`.
- # Do not set to true in production.
- allowUnsupported: false
- # The path on the host where configuration files will be persisted. Must be specified.
- # Important: if you reinstall the cluster, make sure you delete this directory from each host or else the mons will fail to start on the new cluster.
- # In Minikube, the '/data' directory is configured to persist across reboots. Use "/data/rook" in Minikube environment.
- dataDirHostPath: /var/lib/rook
- # Whether or not upgrade should continue even if a check fails
- # This means Ceph's status could be degraded and we don't recommend upgrading but you might decide otherwise
- # Use at your OWN risk
- # To understand Rook's upgrade process of Ceph, read https://rook.io/docs/rook/latest/ceph-upgrade.html#ceph-version-upgrades
- skipUpgradeChecks: false
- # Whether or not continue if PGs are not clean during an upgrade
- continueUpgradeAfterChecksEvenIfNotHealthy: false
- # WaitTimeoutForHealthyOSDInMinutes defines the time (in minutes) the operator would wait before an OSD can be stopped for upgrade or restart.
- # If the timeout exceeds and OSD is not ok to stop, then the operator would skip upgrade for the current OSD and proceed with the next one
- # if `continueUpgradeAfterChecksEvenIfNotHealthy` is `false`. If `continueUpgradeAfterChecksEvenIfNotHealthy` is `true`, then opertor would
- # continue with the upgrade of an OSD even if its not ok to stop after the timeout. This timeout won't be applied if `skipUpgradeChecks` is `true`.
- # The default wait timeout is 10 minutes.
- waitTimeoutForHealthyOSDInMinutes: 10
- mon:
- # Set the number of mons to be started. Generally recommended to be 3.
- # For highest availability, an odd number of mons should be specified.
- count: 3
- # The mons should be on unique nodes. For production, at least 3 nodes are recommended for this reason.
- # Mons should only be allowed on the same node for test environments where data loss is acceptable.
- allowMultiplePerNode: false
- mgr:
- # When higher availability of the mgr is needed, increase the count to 2.
- # In that case, one mgr will be active and one in standby. When Ceph updates which
- # mgr is active, Rook will update the mgr services to match the active mgr.
- count: 1
- modules:
- # Several modules should not need to be included in this list. The "dashboard" and "monitoring" modules
- # are already enabled by other settings in the cluster CR.
- - name: pg_autoscaler
- enabled: true
- # enable the ceph dashboard for viewing cluster status
- dashboard:
- enabled: true
- # serve the dashboard under a subpath (useful when you are accessing the dashboard via a reverse proxy)
- # urlPrefix: /ceph-dashboard
- # serve the dashboard at the given port.
- # port: 8443
- # serve the dashboard using SSL
- ssl: true
- # enable prometheus alerting for cluster
- monitoring:
- # requires Prometheus to be pre-installed
- enabled: false
- # namespace to deploy prometheusRule in. If empty, namespace of the cluster will be used.
- # Recommended:
- # If you have a single rook-ceph cluster, set the rulesNamespace to the same namespace as the cluster or keep it empty.
- # If you have multiple rook-ceph clusters in the same k8s cluster, choose the same namespace (ideally, namespace with prometheus
- # deployed) to set rulesNamespace for all the clusters. Otherwise, you will get duplicate alerts with multiple alert definitions.
- rulesNamespace: rook-ceph
- network:
- # enable host networking
- #provider: host
- # enable the Multus network provider
- #provider: multus
- #selectors:
- # The selector keys are required to be `public` and `cluster`.
- # Based on the configuration, the operator will do the following:
- # 1. if only the `public` selector key is specified both public_network and cluster_network Ceph settings will listen on that interface
- # 2. if both `public` and `cluster` selector keys are specified the first one will point to 'public_network' flag and the second one to 'cluster_network'
- #
- # In order to work, each selector value must match a NetworkAttachmentDefinition object in Multus
- #
- #public: public-conf --> NetworkAttachmentDefinition object name in Multus
- #cluster: cluster-conf --> NetworkAttachmentDefinition object name in Multus
- # Provide internet protocol version. IPv6, IPv4 or empty string are valid options. Empty string would mean IPv4
- #ipFamily: "IPv6"
- # Ceph daemons to listen on both IPv4 and Ipv6 networks
- #dualStack: false
- # enable the crash collector for ceph daemon crash collection
- crashCollector:
- disable: false
- # Uncomment daysToRetain to prune ceph crash entries older than the
- # specified number of days.
- #daysToRetain: 30
- # enable log collector, daemons will log on files and rotate
- # logCollector:
- # enabled: true
- # periodicity: 24h # SUFFIX may be 'h' for hours or 'd' for days.
- # automate [data cleanup process](https://github.com/rook/rook/blob/master/Documentation/ceph-teardown.md#delete-the-data-on-hosts) in cluster destruction.
- cleanupPolicy:
- # Since cluster cleanup is destructive to data, confirmation is required.
- # To destroy all Rook data on hosts during uninstall, confirmation must be set to "yes-really-destroy-data".
- # This value should only be set when the cluster is about to be deleted. After the confirmation is set,
- # Rook will immediately stop configuring the cluster and only wait for the delete command.
- # If the empty string is set, Rook will not destroy any data on hosts during uninstall.
- confirmation: ""
- # sanitizeDisks represents settings for sanitizing OSD disks on cluster deletion
- sanitizeDisks:
- # method indicates if the entire disk should be sanitized or simply ceph's metadata
- # in both case, re-install is possible
- # possible choices are 'complete' or 'quick' (default)
- method: quick
- # dataSource indicate where to get random bytes from to write on the disk
- # possible choices are 'zero' (default) or 'random'
- # using random sources will consume entropy from the system and will take much more time then the zero source
- dataSource: zero
- # iteration overwrite N times instead of the default (1)
- # takes an integer value
- iteration: 1
- # allowUninstallWithVolumes defines how the uninstall should be performed
- # If set to true, cephCluster deletion does not wait for the PVs to be deleted.
- allowUninstallWithVolumes: false
- # To control where various services will be scheduled by kubernetes, use the placement configuration sections below.
- # The example under 'all' would have all services scheduled on kubernetes nodes labeled with 'role=storage-node' and
- # tolerate taints with a key of 'storage-node'.
- # placement:
- # all:
- # nodeAffinity:
- # requiredDuringSchedulingIgnoredDuringExecution:
- # nodeSelectorTerms:
- # - matchExpressions:
- # - key: role
- # operator: In
- # values:
- # - storage-node
- # podAffinity:
- # podAntiAffinity:
- # topologySpreadConstraints:
- # tolerations:
- # - key: storage-node
- # operator: Exists
- # The above placement information can also be specified for mon, osd, and mgr components
- # mon:
- # Monitor deployments may contain an anti-affinity rule for avoiding monitor
- # collocation on the same node. This is a required rule when host network is used
- # or when AllowMultiplePerNode is false. Otherwise this anti-affinity rule is a
- # preferred rule with weight: 50.
- # osd:
- # mgr:
- # cleanup:
- annotations:
- # all:
- # mon:
- # osd:
- # cleanup:
- # prepareosd:
- # If no mgr annotations are set, prometheus scrape annotations will be set by default.
- # mgr:
- labels:
- # all:
- # mon:
- # osd:
- # cleanup:
- # mgr:
- # prepareosd:
- # monitoring is a list of key-value pairs. It is injected into all the monitoring resources created by operator.
- # These labels can be passed as LabelSelector to Prometheus
- # monitoring:
- # crashcollector:
- resources:
- # The requests and limits set here, allow the mgr pod to use half of one CPU core and 1 gigabyte of memory
- # mgr:
- # limits:
- # cpu: "500m"
- # memory: "1024Mi"
- # requests:
- # cpu: "500m"
- # memory: "1024Mi"
- # The above example requests/limits can also be added to the other components
- # mon:
- # osd:
- # For OSD it also is a possible to specify requests/limits based on device class
- # osd-hdd:
- # osd-ssd:
- # osd-nvme:
- # prepareosd:
- # mgr-sidecar:
- # crashcollector:
- # logcollector:
- # cleanup:
- # The option to automatically remove OSDs that are out and are safe to destroy.
- removeOSDsIfOutAndSafeToRemove: true
- # priorityClassNames:
- # all: rook-ceph-default-priority-class
- # mon: rook-ceph-mon-priority-class
- # osd: rook-ceph-osd-priority-class
- # mgr: rook-ceph-mgr-priority-class
- storage: # cluster level storage configuration and selection
- useAllNodes: true
- useAllDevices: true
- #deviceFilter:
- config:
- # crushRoot: "custom-root" # specify a non-default root label for the CRUSH map
- # metadataDevice: "md0" # specify a non-rotational storage so ceph-volume will use it as block db device of bluestore.
- # databaseSizeMB: "1024" # uncomment if the disks are smaller than 100 GB
- # journalSizeMB: "1024" # uncomment if the disks are 20 GB or smaller
- # osdsPerDevice: "1" # this value can be overridden at the node or device level
- # encryptedDevice: "true" # the default value for this option is "false"
- # Individual nodes and their config can be specified as well, but 'useAllNodes' above must be set to false. Then, only the named
- # nodes below will be used as storage resources. Each node's 'name' field should match their 'kubernetes.io/hostname' label.
- # nodes:
- # - name: "172.17.4.201"
- # devices: # specific devices to use for storage can be specified for each node
- # - name: "sdb"
- # - name: "nvme01" # multiple osds can be created on high performance devices
- # config:
- # osdsPerDevice: "5"
- # - name: "/dev/disk/by-id/ata-ST4000DM004-XXXX" # devices can be specified using full udev paths
- # config: # configuration can be specified at the node level which overrides the cluster level config
- # - name: "172.17.4.301"
- # deviceFilter: "^sd."
- # when onlyApplyOSDPlacement is false, will merge both placement.All() and placement.osd
- onlyApplyOSDPlacement: false
- # The section for configuring management of daemon disruptions during upgrade or fencing.
- disruptionManagement:
- # If true, the operator will create and manage PodDisruptionBudgets for OSD, Mon, RGW, and MDS daemons. OSD PDBs are managed dynamically
- # via the strategy outlined in the [design](https://github.com/rook/rook/blob/master/design/ceph/ceph-managed-disruptionbudgets.md). The operator will
- # block eviction of OSDs by default and unblock them safely when drains are detected.
- managePodBudgets: true
- # A duration in minutes that determines how long an entire failureDomain like `region/zone/host` will be held in `noout` (in addition to the
- # default DOWN/OUT interval) when it is draining. This is only relevant when `managePodBudgets` is `true`. The default value is `30` minutes.
- osdMaintenanceTimeout: 30
- # A duration in minutes that the operator will wait for the placement groups to become healthy (active+clean) after a drain was completed and OSDs came back up.
- # Operator will continue with the next drain if the timeout exceeds. It only works if `managePodBudgets` is `true`.
- # No values or 0 means that the operator will wait until the placement groups are healthy before unblocking the next drain.
- pgHealthCheckTimeout: 0
- # If true, the operator will create and manage MachineDisruptionBudgets to ensure OSDs are only fenced when the cluster is healthy.
- # Only available on OpenShift.
- manageMachineDisruptionBudgets: false
- # Namespace in which to watch for the MachineDisruptionBudgets.
- machineDisruptionBudgetNamespace: openshift-machine-api
-
- # healthChecks
- # Valid values for daemons are 'mon', 'osd', 'status'
- healthCheck:
- daemonHealth:
- mon:
- disabled: false
- interval: 45s
- osd:
- disabled: false
- interval: 60s
- status:
- disabled: false
- interval: 60s
- # Change pod liveness probe, it works for all mon,mgr,osd daemons
- livenessProbe:
- mon:
- disabled: false
- mgr:
- disabled: false
- osd:
- disabled: false
-
- operator:
- contents: |
-
- # Rook Ceph Operator Config ConfigMap
- # Use this ConfigMap to override Rook-Ceph Operator configurations.
- # NOTE! Precedence will be given to this config if the same Env Var config also exists in the
- # Operator Deployment.
- # To move a configuration(s) from the Operator Deployment to this ConfigMap, add the config
- # here. It is recommended to then remove it from the Deployment to eliminate any future confusion.
-
- kind: ConfigMap
- apiVersion: v1
- metadata:
- name: rook-ceph-operator-config
- # should be in the namespace of the operator
- namespace: rook-ceph # namespace:operator
- data:
- # The logging level for the operator: ERROR | WARNING | INFO | DEBUG
- ROOK_LOG_LEVEL: "INFO"
-
- # Enable the CSI driver.
- # To run the non-default version of the CSI driver, see the override-able image properties in operator.yaml
- ROOK_CSI_ENABLE_CEPHFS: "true"
- # Enable the default version of the CSI RBD driver. To start another version of the CSI driver, see image properties below.
- ROOK_CSI_ENABLE_RBD: "true"
- ROOK_CSI_ENABLE_GRPC_METRICS: "false"
-
- # Set to true to enable host networking for CSI CephFS and RBD nodeplugins. This may be necessary
- # in some network configurations where the SDN does not provide access to an external cluster or
- # there is significant drop in read/write performance.
- # CSI_ENABLE_HOST_NETWORK: "true"
-
- # Set logging level for csi containers.
- # Supported values from 0 to 5. 0 for general useful logs, 5 for trace level verbosity.
- # CSI_LOG_LEVEL: "0"
-
- # Set replicas for csi provisioner deployment.
- CSI_PROVISIONER_REPLICAS: "2"
-
- # OMAP generator will generate the omap mapping between the PV name and the RBD image.
- # CSI_ENABLE_OMAP_GENERATOR need to be enabled when we are using rbd mirroring feature.
- # By default OMAP generator sidecar is deployed with CSI provisioner pod, to disable
- # it set it to false.
- # CSI_ENABLE_OMAP_GENERATOR: "false"
-
- # set to false to disable deployment of snapshotter container in CephFS provisioner pod.
- CSI_ENABLE_CEPHFS_SNAPSHOTTER: "true"
-
- # set to false to disable deployment of snapshotter container in RBD provisioner pod.
- CSI_ENABLE_RBD_SNAPSHOTTER: "true"
-
- # Enable cephfs kernel driver instead of ceph-fuse.
- # If you disable the kernel client, your application may be disrupted during upgrade.
- # See the upgrade guide: https://rook.io/docs/rook/latest/ceph-upgrade.html
- # NOTE! cephfs quota is not supported in kernel version < 4.17
- CSI_FORCE_CEPHFS_KERNEL_CLIENT: "true"
-
- # (Optional) policy for modifying a volume's ownership or permissions when the RBD PVC is being mounted.
- # supported values are documented at https://kubernetes-csi.github.io/docs/support-fsgroup.html
- CSI_RBD_FSGROUPPOLICY: "ReadWriteOnceWithFSType"
-
- # (Optional) policy for modifying a volume's ownership or permissions when the CephFS PVC is being mounted.
- # supported values are documented at https://kubernetes-csi.github.io/docs/support-fsgroup.html
- CSI_CEPHFS_FSGROUPPOLICY: "None"
-
- # (Optional) Allow starting unsupported ceph-csi image
- ROOK_CSI_ALLOW_UNSUPPORTED_VERSION: "false"
-
- # (Optional) control the host mount of /etc/selinux for csi plugin pods.
- CSI_PLUGIN_ENABLE_SELINUX_HOST_MOUNT: "false"
-
- # The default version of CSI supported by Rook will be started. To change the version
- # of the CSI driver to something other than what is officially supported, change
- # these images to the desired release of the CSI driver.
- # ROOK_CSI_CEPH_IMAGE: "quay.io/cephcsi/cephcsi:v3.4.0"
- # ROOK_CSI_REGISTRAR_IMAGE: "k8s.gcr.io/sig-storage/csi-node-driver-registrar:v2.3.0"
- # ROOK_CSI_RESIZER_IMAGE: "k8s.gcr.io/sig-storage/csi-resizer:v1.3.0"
- # ROOK_CSI_PROVISIONER_IMAGE: "k8s.gcr.io/sig-storage/csi-provisioner:v3.0.0"
- # ROOK_CSI_SNAPSHOTTER_IMAGE: "k8s.gcr.io/sig-storage/csi-snapshotter:v4.2.0"
- # ROOK_CSI_ATTACHER_IMAGE: "k8s.gcr.io/sig-storage/csi-attacher:v3.3.0"
-
- # (Optional) set user created priorityclassName for csi plugin pods.
- # CSI_PLUGIN_PRIORITY_CLASSNAME: "system-node-critical"
-
- # (Optional) set user created priorityclassName for csi provisioner pods.
- # CSI_PROVISIONER_PRIORITY_CLASSNAME: "system-cluster-critical"
-
- # CSI CephFS plugin daemonset update strategy, supported values are OnDelete and RollingUpdate.
- # Default value is RollingUpdate.
- # CSI_CEPHFS_PLUGIN_UPDATE_STRATEGY: "OnDelete"
- # CSI RBD plugin daemonset update strategy, supported values are OnDelete and RollingUpdate.
- # Default value is RollingUpdate.
- # CSI_RBD_PLUGIN_UPDATE_STRATEGY: "OnDelete"
-
- # kubelet directory path, if kubelet configured to use other than /var/lib/kubelet path.
- # ROOK_CSI_KUBELET_DIR_PATH: "/var/lib/kubelet"
-
- # Labels to add to the CSI CephFS Deployments and DaemonSets Pods.
- # ROOK_CSI_CEPHFS_POD_LABELS: "key1=value1,key2=value2"
- # Labels to add to the CSI RBD Deployments and DaemonSets Pods.
- # ROOK_CSI_RBD_POD_LABELS: "key1=value1,key2=value2"
-
- # (Optional) CephCSI provisioner NodeAffinity(applied to both CephFS and RBD provisioner).
- # CSI_PROVISIONER_NODE_AFFINITY: "role=storage-node; storage=rook, ceph"
- # (Optional) CephCSI provisioner tolerations list(applied to both CephFS and RBD provisioner).
- # Put here list of taints you want to tolerate in YAML format.
- # CSI provisioner would be best to start on the same nodes as other ceph daemons.
- # CSI_PROVISIONER_TOLERATIONS: |
- # - effect: NoSchedule
- # key: node-role.kubernetes.io/controlplane
- # operator: Exists
- # - effect: NoExecute
- # key: node-role.kubernetes.io/etcd
- # operator: Exists
- # (Optional) CephCSI plugin NodeAffinity(applied to both CephFS and RBD plugin).
- # CSI_PLUGIN_NODE_AFFINITY: "role=storage-node; storage=rook, ceph"
- # (Optional) CephCSI plugin tolerations list(applied to both CephFS and RBD plugin).
- # Put here list of taints you want to tolerate in YAML format.
- # CSI plugins need to be started on all the nodes where the clients need to mount the storage.
- # CSI_PLUGIN_TOLERATIONS: |
- # - effect: NoSchedule
- # key: node-role.kubernetes.io/controlplane
- # operator: Exists
- # - effect: NoExecute
- # key: node-role.kubernetes.io/etcd
- # operator: Exists
-
- # (Optional) CephCSI RBD provisioner NodeAffinity(if specified, overrides CSI_PROVISIONER_NODE_AFFINITY).
- # CSI_RBD_PROVISIONER_NODE_AFFINITY: "role=rbd-node"
- # (Optional) CephCSI RBD provisioner tolerations list(if specified, overrides CSI_PROVISIONER_TOLERATIONS).
- # Put here list of taints you want to tolerate in YAML format.
- # CSI provisioner would be best to start on the same nodes as other ceph daemons.
- # CSI_RBD_PROVISIONER_TOLERATIONS: |
- # - key: node.rook.io/rbd
- # operator: Exists
- # (Optional) CephCSI RBD plugin NodeAffinity(if specified, overrides CSI_PLUGIN_NODE_AFFINITY).
- # CSI_RBD_PLUGIN_NODE_AFFINITY: "role=rbd-node"
- # (Optional) CephCSI RBD plugin tolerations list(if specified, overrides CSI_PLUGIN_TOLERATIONS).
- # Put here list of taints you want to tolerate in YAML format.
- # CSI plugins need to be started on all the nodes where the clients need to mount the storage.
- # CSI_RBD_PLUGIN_TOLERATIONS: |
- # - key: node.rook.io/rbd
- # operator: Exists
-
- # (Optional) CephCSI CephFS provisioner NodeAffinity(if specified, overrides CSI_PROVISIONER_NODE_AFFINITY).
- # CSI_CEPHFS_PROVISIONER_NODE_AFFINITY: "role=cephfs-node"
- # (Optional) CephCSI CephFS provisioner tolerations list(if specified, overrides CSI_PROVISIONER_TOLERATIONS).
- # Put here list of taints you want to tolerate in YAML format.
- # CSI provisioner would be best to start on the same nodes as other ceph daemons.
- # CSI_CEPHFS_PROVISIONER_TOLERATIONS: |
- # - key: node.rook.io/cephfs
- # operator: Exists
- # (Optional) CephCSI CephFS plugin NodeAffinity(if specified, overrides CSI_PLUGIN_NODE_AFFINITY).
- # CSI_CEPHFS_PLUGIN_NODE_AFFINITY: "role=cephfs-node"
- # (Optional) CephCSI CephFS plugin tolerations list(if specified, overrides CSI_PLUGIN_TOLERATIONS).
- # Put here list of taints you want to tolerate in YAML format.
- # CSI plugins need to be started on all the nodes where the clients need to mount the storage.
- # CSI_CEPHFS_PLUGIN_TOLERATIONS: |
- # - key: node.rook.io/cephfs
- # operator: Exists
-
- # (Optional) CEPH CSI RBD provisioner resource requirement list, Put here list of resource
- # requests and limits you want to apply for provisioner pod
- # CSI_RBD_PROVISIONER_RESOURCE: |
- # - name : csi-provisioner
- # resource:
- # requests:
- # memory: 128Mi
- # cpu: 100m
- # limits:
- # memory: 256Mi
- # cpu: 200m
- # - name : csi-resizer
- # resource:
- # requests:
- # memory: 128Mi
- # cpu: 100m
- # limits:
- # memory: 256Mi
- # cpu: 200m
- # - name : csi-attacher
- # resource:
- # requests:
- # memory: 128Mi
- # cpu: 100m
- # limits:
- # memory: 256Mi
- # cpu: 200m
- # - name : csi-snapshotter
- # resource:
- # requests:
- # memory: 128Mi
- # cpu: 100m
- # limits:
- # memory: 256Mi
- # cpu: 200m
- # - name : csi-rbdplugin
- # resource:
- # requests:
- # memory: 512Mi
- # cpu: 250m
- # limits:
- # memory: 1Gi
- # cpu: 500m
- # - name : liveness-prometheus
- # resource:
- # requests:
- # memory: 128Mi
- # cpu: 50m
- # limits:
- # memory: 256Mi
- # cpu: 100m
- # (Optional) CEPH CSI RBD plugin resource requirement list, Put here list of resource
- # requests and limits you want to apply for plugin pod
- # CSI_RBD_PLUGIN_RESOURCE: |
- # - name : driver-registrar
- # resource:
- # requests:
- # memory: 128Mi
- # cpu: 50m
- # limits:
- # memory: 256Mi
- # cpu: 100m
- # - name : csi-rbdplugin
- # resource:
- # requests:
- # memory: 512Mi
- # cpu: 250m
- # limits:
- # memory: 1Gi
- # cpu: 500m
- # - name : liveness-prometheus
- # resource:
- # requests:
- # memory: 128Mi
- # cpu: 50m
- # limits:
- # memory: 256Mi
- # cpu: 100m
- # (Optional) CEPH CSI CephFS provisioner resource requirement list, Put here list of resource
- # requests and limits you want to apply for provisioner pod
- # CSI_CEPHFS_PROVISIONER_RESOURCE: |
- # - name : csi-provisioner
- # resource:
- # requests:
- # memory: 128Mi
- # cpu: 100m
- # limits:
- # memory: 256Mi
- # cpu: 200m
- # - name : csi-resizer
- # resource:
- # requests:
- # memory: 128Mi
- # cpu: 100m
- # limits:
- # memory: 256Mi
- # cpu: 200m
- # - name : csi-attacher
- # resource:
- # requests:
- # memory: 128Mi
- # cpu: 100m
- # limits:
- # memory: 256Mi
- # cpu: 200m
- # - name : csi-cephfsplugin
- # resource:
- # requests:
- # memory: 512Mi
- # cpu: 250m
- # limits:
- # memory: 1Gi
- # cpu: 500m
- # - name : liveness-prometheus
- # resource:
- # requests:
- # memory: 128Mi
- # cpu: 50m
- # limits:
- # memory: 256Mi
- # cpu: 100m
- # (Optional) CEPH CSI CephFS plugin resource requirement list, Put here list of resource
- # requests and limits you want to apply for plugin pod
- # CSI_CEPHFS_PLUGIN_RESOURCE: |
- # - name : driver-registrar
- # resource:
- # requests:
- # memory: 128Mi
- # cpu: 50m
- # limits:
- # memory: 256Mi
- # cpu: 100m
- # - name : csi-cephfsplugin
- # resource:
- # requests:
- # memory: 512Mi
- # cpu: 250m
- # limits:
- # memory: 1Gi
- # cpu: 500m
- # - name : liveness-prometheus
- # resource:
- # requests:
- # memory: 128Mi
- # cpu: 50m
- # limits:
- # memory: 256Mi
- # cpu: 100m
-
- # Configure CSI CSI Ceph FS grpc and liveness metrics port
- # CSI_CEPHFS_GRPC_METRICS_PORT: "9091"
- # CSI_CEPHFS_LIVENESS_METRICS_PORT: "9081"
- # Configure CSI RBD grpc and liveness metrics port
- # CSI_RBD_GRPC_METRICS_PORT: "9090"
- # CSI_RBD_LIVENESS_METRICS_PORT: "9080"
-
- # Whether the OBC provisioner should watch on the operator namespace or not, if not the namespace of the cluster will be used
- ROOK_OBC_WATCH_OPERATOR_NAMESPACE: "true"
-
- # Whether to start the discovery daemon to watch for raw storage devices on nodes in the cluster.
- # This daemon does not need to run if you are only going to create your OSDs based on StorageClassDeviceSets with PVCs.
- ROOK_ENABLE_DISCOVERY_DAEMON: "false"
- # The timeout value (in seconds) of Ceph commands. It should be >= 1. If this variable is not set or is an invalid value, it's default to 15.
- ROOK_CEPH_COMMANDS_TIMEOUT_SECONDS: "15"
- # Enable the volume replication controller.
- # Before enabling, ensure the Volume Replication CRDs are created.
- # See https://rook.io/docs/rook/latest/ceph-csi-drivers.html#rbd-mirroring
- CSI_ENABLE_VOLUME_REPLICATION: "false"
- # CSI_VOLUME_REPLICATION_IMAGE: "quay.io/csiaddons/volumereplication-operator:v0.1.0"
- ---
- # OLM: BEGIN OPERATOR DEPLOYMENT
- apiVersion: apps/v1
- kind: Deployment
- metadata:
- name: rook-ceph-operator
- namespace: rook-ceph # namespace:operator
- labels:
- operator: rook
- storage-backend: ceph
- app.kubernetes.io/name: rook-ceph
- app.kubernetes.io/instance: rook-ceph
- app.kubernetes.io/component: rook-ceph-operator
- app.kubernetes.io/part-of: rook-ceph-operator
- spec:
- selector:
- matchLabels:
- app: rook-ceph-operator
- replicas: 1
- template:
- metadata:
- labels:
- app: rook-ceph-operator
- spec:
- serviceAccountName: rook-ceph-system
- containers:
- - name: rook-ceph-operator
- image: rook/ceph:v1.8.0
- args: ["ceph", "operator"]
- securityContext:
- runAsNonRoot: true
- runAsUser: 2016
- runAsGroup: 2016
- volumeMounts:
- - mountPath: /var/lib/rook
- name: rook-config
- - mountPath: /etc/ceph
- name: default-config-dir
- - mountPath: /etc/webhook
- name: webhook-cert
- ports:
- - containerPort: 9443
- name: https-webhook
- protocol: TCP
- env:
- # If the operator should only watch for cluster CRDs in the same namespace, set this to "true".
- # If this is not set to true, the operator will watch for cluster CRDs in all namespaces.
- - name: ROOK_CURRENT_NAMESPACE_ONLY
- value: "false"
- # Rook Discover toleration. Will tolerate all taints with all keys.
- # Choose between NoSchedule, PreferNoSchedule and NoExecute:
- # - name: DISCOVER_TOLERATION
- # value: "NoSchedule"
- # (Optional) Rook Discover toleration key. Set this to the key of the taint you want to tolerate
- # - name: DISCOVER_TOLERATION_KEY
- # value: ""
- # (Optional) Rook Discover tolerations list. Put here list of taints you want to tolerate in YAML format.
- # - name: DISCOVER_TOLERATIONS
- # value: |
- # - effect: NoSchedule
- # key: node-role.kubernetes.io/controlplane
- # operator: Exists
- # - effect: NoExecute
- # key: node-role.kubernetes.io/etcd
- # operator: Exists
- # (Optional) Rook Discover priority class name to set on the pod(s)
- # - name: DISCOVER_PRIORITY_CLASS_NAME
- # value: ""
- # (Optional) Discover Agent NodeAffinity.
- # - name: DISCOVER_AGENT_NODE_AFFINITY
- # value: "role=storage-node; storage=rook, ceph"
- # (Optional) Discover Agent Pod Labels.
- # - name: DISCOVER_AGENT_POD_LABELS
- # value: "key1=value1,key2=value2"
-
- # The duration between discovering devices in the rook-discover daemonset.
- - name: ROOK_DISCOVER_DEVICES_INTERVAL
- value: "60m"
-
- # Whether to start pods as privileged that mount a host path, which includes the Ceph mon and osd pods.
- # Set this to true if SELinux is enabled (e.g. OpenShift) to workaround the anyuid issues.
- # For more details see https://github.com/rook/rook/issues/1314#issuecomment-355799641
- - name: ROOK_HOSTPATH_REQUIRES_PRIVILEGED
- value: "false"
-
- # In some situations SELinux relabelling breaks (times out) on large filesystems, and doesn't work with cephfs ReadWriteMany volumes (last relabel wins).
- # Disable it here if you have similar issues.
- # For more details see https://github.com/rook/rook/issues/2417
- - name: ROOK_ENABLE_SELINUX_RELABELING
- value: "true"
-
- # In large volumes it will take some time to chown all the files. Disable it here if you have performance issues.
- # For more details see https://github.com/rook/rook/issues/2254
- - name: ROOK_ENABLE_FSGROUP
- value: "true"
-
- # Disable automatic orchestration when new devices are discovered
- - name: ROOK_DISABLE_DEVICE_HOTPLUG
- value: "false"
-
- # Provide customised regex as the values using comma. For eg. regex for rbd based volume, value will be like "(?i)rbd[0-9]+".
- # In case of more than one regex, use comma to separate between them.
- # Default regex will be "(?i)dm-[0-9]+,(?i)rbd[0-9]+,(?i)nbd[0-9]+"
- # Add regex expression after putting a comma to blacklist a disk
- # If value is empty, the default regex will be used.
- - name: DISCOVER_DAEMON_UDEV_BLACKLIST
- value: "(?i)dm-[0-9]+,(?i)rbd[0-9]+,(?i)nbd[0-9]+"
-
- # Time to wait until the node controller will move Rook pods to other
- # nodes after detecting an unreachable node.
- # Pods affected by this setting are:
- # mgr, rbd, mds, rgw, nfs, PVC based mons and osds, and ceph toolbox
- # The value used in this variable replaces the default value of 300 secs
- # added automatically by k8s as Toleration for
- #
- # The total amount of time to reschedule Rook pods in healthy nodes
- # before detecting a condition will be the sum of:
- # --> node-monitor-grace-period: 40 seconds (k8s kube-controller-manager flag)
- # --> ROOK_UNREACHABLE_NODE_TOLERATION_SECONDS: 5 seconds
- - name: ROOK_UNREACHABLE_NODE_TOLERATION_SECONDS
- value: "5"
-
- # The name of the node to pass with the downward API
- - name: NODE_NAME
- valueFrom:
- fieldRef:
- fieldPath: spec.nodeName
- # The pod name to pass with the downward API
- - name: POD_NAME
- valueFrom:
- fieldRef:
- fieldPath: metadata.name
- # The pod namespace to pass with the downward API
- - name: POD_NAMESPACE
- valueFrom:
- fieldRef:
- fieldPath: metadata.namespace
- # Recommended resource requests and limits, if desired
- #resources:
- # limits:
- # cpu: 500m
- # memory: 256Mi
- # requests:
- # cpu: 100m
- # memory: 128Mi
-
- # Uncomment it to run lib bucket provisioner in multithreaded mode
- #- name: LIB_BUCKET_PROVISIONER_THREADS
- # value: "5"
-
- # Uncomment it to run rook operator on the host network
- #hostNetwork: true
- volumes:
- - name: rook-config
- emptyDir: {}
- - name: default-config-dir
- emptyDir: {}
- - name: webhook-cert
- emptyDir: {}
- # OLM: END OPERATOR DEPLOYMENT
-
- toolbox:
- contents: |
- apiVersion: apps/v1
- kind: Deployment
- metadata:
- name: rook-ceph-tools
- namespace: rook-ceph # namespace:cluster
- labels:
- app: rook-ceph-tools
- spec:
- replicas: 1
- selector:
- matchLabels:
- app: rook-ceph-tools
- template:
- metadata:
- labels:
- app: rook-ceph-tools
- spec:
- dnsPolicy: ClusterFirstWithHostNet
- containers:
- - name: rook-ceph-tools
- image: rook/ceph:v1.8.4
- command: ["/bin/bash"]
- args: ["-m", "-c", "/usr/local/bin/toolbox.sh"]
- imagePullPolicy: IfNotPresent
- tty: true
- securityContext:
- runAsNonRoot: true
- runAsUser: 2016
- runAsGroup: 2016
- env:
- - name: ROOK_CEPH_USERNAME
- valueFrom:
- secretKeyRef:
- name: rook-ceph-mon
- key: ceph-username
- - name: ROOK_CEPH_SECRET
- valueFrom:
- secretKeyRef:
- name: rook-ceph-mon
- key: ceph-secret
- volumeMounts:
- - mountPath: /etc/ceph
- name: ceph-config
- - name: mon-endpoint-volume
- mountPath: /etc/rook
- volumes:
- - name: mon-endpoint-volume
- configMap:
- name: rook-ceph-mon-endpoints
- items:
- - key: data
- path: mon-endpoints
- - name: ceph-config
- emptyDir: {}
- tolerations:
- - key: "node.kubernetes.io/unreachable"
- operator: "Exists"
- effect: "NoExecute"
- tolerationSeconds: 5
\ No newline at end of file
diff --git a/examples/libvirt/config/profile/profile1-export-system.yaml b/examples/libvirt/config/profile/profile1-export-system.yaml
deleted file mode 100644
index 7f9ebe0..0000000
--- a/examples/libvirt/config/profile/profile1-export-system.yaml
+++ /dev/null
@@ -1,703 +0,0 @@
-name: Trailblazer System Profile(Pfsense+Vault) NC_test
-uid: 62273780cd04aed456a9c90b
-description: ""
-type: system
-packs:
- - name: pfsense-gateway
- type: manifest
- layer: addon
- registry_uid: ""
- registry: ""
- manifests:
- - name: pfsense-gateway-config
- content: |
- apiVersion: v1
- data:
- user-data.tmpl: |
- #cloud-config
- write_files:
- - encoding: base64
- content: $${CONFIG_XML}
- owner: root:root
- path: /cf/conf/config.xml
- permissions: '0644'
- config-xml.tmpl: |
-
-
- 21.5
-
-
- normal
- gateway
- edison
-
- all
-
- system
- 1998
-
-
- admins
-
- system
- 1999
- 0
- page-all
-
-
- admin
-
- system
- admins
- $2y$10$K3ejlPZsxpqhd63EzyNgf.qK4RDhlSvW2ln.gIq7kbkVdlRiZ1XB2
- 0
- user-shell-access
- 2
- c3NoLXJzYSBBQUFBQjNOemFDMXljMkVBQUFBREFRQUJBQUFCZ1FDdzR4OW43NEtsVnRqUnBBRUhSVWJldnI2dVBDa0NnYkpHSTBKTUFhdnJ3K2JlTmI1V0wyOE1nOFU0MzBiWnhJRWFHU05pSXQreDk2aGFPY0RjOEFaMjRDa0Uxb1ZUS2x6aExyQkJGc0hmaVpuMHRYK1dtZWIyaGZBcmxQRnk2ZXJaZHRVTTUrYUQ2VFBYZmR4M0VWT2FBRzNncTFhbS9lSzFBM1NPVTVjTnFZMExjcWpGVDc0dG03T0NNc1VFSkg4emlCKzQ3ZTh2TWtDZjVIcTJqdVd5RjU0NTNiYzRaRHBMcW1hSXZ0UGpyN3NyQllLczI2MWpxRFlOWEF1aytyTExoRUt4VmNuY1o1U1pDbDJyWGVIdlRnb3lqZE1IWjYvWTNKZXhuYmg0Q2dkUVlmUU9HQStyQ01mVy9MTmFBV2hCUUlWRzZ1QU5mSEhhMTZQVE1XQTQzdlhTdUFCQ0VRQmR5VVhMQi9WWFlmcklOWTJQTnNPOG1FenVWcFRSaEQrSnRpT3BFejVQTlNXZk9qSUpSYjBBSjVDR2pHb0pSczdrck5QKzZsUWlTL0k5TnV4OThURjRla2FsYkMrOG9wUGFnTEhuL2NxaG9sSm1qQXp6QzV1by9yY1VKcldUNEYvODNoYmpUdEhKT01KdEZXV2lZcURmUWVoOENGOUJoeGs9IHJvb3RAbG9jYWxob3N0
-
- pfSense.css
-
-
- user
- $2y$10$5i3toKDJLCdLS0w71m7TF.49vokuIneuA97Tq3cJYotKzeEbn0Lqu
-
- sysgwservice
-
- 2
-
-
- pfSense.css
- 2001
- user-shell-access
-
- 2000
- 2000
- $${NTP}
-
- http
-
- 602232b5962a3
- 2
- 28080
- 2
- pfSense.css
- 1e3f75;
-
-
- yes
-
-
-
- 400000
- hadp
- hadp
- hadp
-
- monthly
-
-
-
- enabled
- enabled
- 25812
-
- Etc/UTC
- 115200
- serial
-
-
-
-
- en_US
-
-
- $${DNS_1}
- $${DNS_2}
-
-
-
-
-
- vtnet1
-
-
-
- 32
- $${IP_ADDR_WAN}
- $${SUBNET_WAN}
- WANGW
-
-
-
- vtnet0
-
-
- $${IP_ADDR_LAN}
- $${SUBNET_LAN}
-
-
-
-
-
-
-
- $${DHCP_RANGE_START}
- $${DHCP_RANGE_END}
-
-
-
-
- 94670856
- 189341712
-
-
-
-
-
-
-
-
- hmac-md5
-
-
-
- allow
- gateway.edison
-
-
-
-
-
-
-
-
-
-
-
-
-
- ::1000
- ::2000
-
- assist
- medium
-
-
-
-
-
- public
-
-
-
-
-
- 1
-
-
-
- automatic
-
-
-
-
-
- 1626111739
- pass
- wan
- inet
-
-
-
-
-
-
-
-
-
- tcp
-
-
-
- 25812
-
-
-
-
- pass
- inet
-
- lan
- 0100000101
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
- -
- 1,31
- 0-5
- *
- *
- *
- root
- /usr/bin/nice -n20 adjkerntz -a
-
- -
- 1
- 3
- 1
- *
- *
- root
- /usr/bin/nice -n20 /etc/rc.update_bogons.sh
-
- -
- 1
- 1
- *
- *
- *
- root
- /usr/bin/nice -n20 /etc/rc.dyndns.update
-
- -
- */60
- *
- *
- *
- *
- root
- /usr/bin/nice -n20 /usr/local/sbin/expiretable -v -t 3600 virusprot
-
- -
- 30
- 12
- *
- *
- *
- root
- /usr/bin/nice -n20 /etc/rc.update_urltables
-
- -
- 1
- 0
- *
- *
- *
- root
- /usr/bin/nice -n20 /etc/rc.update_pkg_metadata
-
- -
- */1
- *
- *
- *
- *
- root
- /usr/sbin/newsyslog
-
- -
- 1
- 3
- *
- *
- *
- root
- /etc/rc.periodic daily
-
- -
- 15
- 4
- *
- *
- 6
- root
- /etc/rc.periodic weekly
-
- -
- 30
- 5
- 1
- *
- *
- root
- /etc/rc.periodic monthly
-
-
-
-
-
-
-
- system_information:col1:show,netgate_services_and_support:col2:show,interfaces:col2:show
- 10
-
-
-
-
- all
- all
-
-
-
-
-
- 602232b5962a3
-
-
-
- transparent
-
-
-
-
-
-
-
-
- 602232b5962a3
-
- server
- LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUVoRENDQTJ5Z0F3SUJBZ0lCQURBTkJna3Foa2lHOXcwQkFRc0ZBREJhTVRnd05nWURWUVFLRXk5d1psTmwKYm5ObElIZGxZa052Ym1acFozVnlZWFJ2Y2lCVFpXeG1MVk5wWjI1bFpDQkRaWEowYVdacFkyRjBaVEVlTUJ3RwpBMVVFQXhNVmNHWlRaVzV6WlMwMk1ESXlNekppTlRrMk1tRXpNQjRYRFRJeE1ESXdPVEEyTlRrd01Wb1hEVEl5Ck1ETXhOREEyTlRrd01Wb3dXakU0TURZR0ExVUVDaE12Y0daVFpXNXpaU0IzWldKRGIyNW1hV2QxY21GMGIzSWcKVTJWc1ppMVRhV2R1WldRZ1EyVnlkR2xtYVdOaGRHVXhIakFjQmdOVkJBTVRGWEJtVTJWdWMyVXROakF5TWpNeQpZalU1TmpKaE16Q0NBU0l3RFFZSktvWklodmNOQVFFQkJRQURnZ0VQQURDQ0FRb0NnZ0VCQU1ZblNtUG93OUt6Cml5b2ZVeW5ZaGN2YnN3YTVaVFVYTVVmZGdjaTlzamp2Ymw1eUhHZlhaY1BCdzRuUk14eUQ4enE0dEtuZ2EzSEkKNFNqbDJkamljdHM2c0loQ1FxbUg4bGpUWWVjRkw3WDlpRUdWaHpnU0tPcURzS3NUeGhaMEs0TkVwMlM1bUdTUgowZ0pWbTlBeTJDUzhVWUFjRFUrVmd5bVR5cVVUZW5CcEVyVzlWSEZmUnhxZTBUcFlmT2tXbjRSb3RxMnNrRGlhCnN4dWhWd2pMU3dNR2lqSnJ0aTM3UGQyRnd0UW1CRGY4TEI3NmhuY1BpMmVDWXE1Ly9EZnllZkRPOVVEZ0ZxL3QKNHp6d3JkT2FTcGJoVzVRV0RjWGtNQU96OGNxTTJZZVFxVVZGRGV0aFFPcnZrMjQyaXRMTWtzWmh5aHFOL21ITgpTN0JZN3F6QXVCY0NBd0VBQWFPQ0FWTXdnZ0ZQTUFrR0ExVWRFd1FDTUFBd0VRWUpZSVpJQVliNFFnRUJCQVFECkFnWkFNQXNHQTFVZER3UUVBd0lGb0RBekJnbGdoa2dCaHZoQ0FRMEVKaFlrVDNCbGJsTlRUQ0JIWlc1bGNtRjAKWldRZ1UyVnlkbVZ5SUVObGNuUnBabWxqWVhSbE1CMEdBMVVkRGdRV0JCUzY1eDhTQmJhY3FsN3gzL05UcEFqTgpPUlpaNmpDQmdnWURWUjBqQkhzd2VZQVV1dWNmRWdXMm5LcGU4ZC96VTZRSXpUa1dXZXFoWHFSY01Gb3hPREEyCkJnTlZCQW9UTDNCbVUyVnVjMlVnZDJWaVEyOXVabWxuZFhKaGRHOXlJRk5sYkdZdFUybG5ibVZrSUVObGNuUnAKWm1sallYUmxNUjR3SEFZRFZRUURFeFZ3WmxObGJuTmxMVFl3TWpJek1tSTFPVFl5WVRPQ0FRQXdKd1lEVlIwbApCQ0F3SGdZSUt3WUJCUVVIQXdFR0NDc0dBUVVGQndNQ0JnZ3JCZ0VGQlFnQ0FqQWdCZ05WSFJFRUdUQVhnaFZ3ClpsTmxibk5sTFRZd01qSXpNbUkxT1RZeVlUTXdEUVlKS29aSWh2Y05BUUVMQlFBRGdnRUJBTVFoWi9JNy8vbVAKSVh0dHo0Y0V3MlQ5SjdlSlpGQStSUy9UcStPUVlMU2tSWTNTQVh0cElYWWRhTkdFYWxuVDM4ZmZ6VllFb2hLawpKTXpTUURkUkxYYmZRS2oxZi9ZM1B0NFlOeFg2Q2lFTUI4cWFINFlOZmphVk1pdVloaUIwKzZiaW9Ic1plR2tECitQYlEzTzZ3cUg3aWVlMEM0Q2FHUUhWVFphQ2dOelBFUU9WYmdJc245Wm1UZlBETHF4OG9RZkRybm1ETzFwV1kKWHowMDlWTmRyb3VWNlVEc0xlSTd2VjAwQ3RNa2NFSU1ZTlBaSHliYjZ4Y3EzRU1BQjlYdG9BeklLYXdGWDA3bwo1bkR1T3dqZ0d2SzdqcHp1bmxpNm54TVNlYlpocjdXOWQra1ZUQU1Qbk5zd2ZsY0hvZXo5NXFYVGZzSjA3MXVzCmZYLzloSWZ6cHlzPQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0tCg==
- LS0tLS1CRUdJTiBQUklWQVRFIEtFWS0tLS0tCk1JSUV2UUlCQURBTkJna3Foa2lHOXcwQkFRRUZBQVNDQktjd2dnU2pBZ0VBQW9JQkFRREdKMHBqNk1QU3M0c3EKSDFNcDJJWEwyN01HdVdVMUZ6RkgzWUhJdmJJNDcyNWVjaHhuMTJYRHdjT0owVE1jZy9NNnVMU3A0R3R4eU9Fbwo1ZG5ZNG5MYk9yQ0lRa0twaC9KWTAySG5CUysxL1loQmxZYzRFaWpxZzdDckU4WVdkQ3VEUktka3VaaGtrZElDClZadlFNdGdrdkZHQUhBMVBsWU1wazhxbEUzcHdhUksxdlZSeFgwY2FudEU2V0h6cEZwK0VhTGF0ckpBNG1yTWIKb1ZjSXkwc0RCb295YTdZdCt6M2RoY0xVSmdRMy9Dd2Urb1ozRDR0bmdtS3VmL3czOG5ud3p2VkE0QmF2N2VNOAo4SzNUbWtxVzRWdVVGZzNGNURBRHMvSEtqTm1Ia0tsRlJRM3JZVURxNzVOdU5vclN6SkxHWWNvYWpmNWh6VXV3CldPNnN3TGdYQWdNQkFBRUNnZ0VBU1BrWlBUZndQYjBmSi9FSFNHYlR4eUp1aUxCbjVDaFpYclRsWWM3eVBtTjIKaXRPTlFIU2pwbmJLRXd2MnZhNmVPV2JpS1psYkdEazVLajJUZjNyTmY0emFqQ09TdHlvNGhwdTU5MEVDdEF2ZApUUUhMSFV3YXcxcWlDaTZyQ1lQWXJtdnZGWG4vOURGN1hKNjRWZkVUaGx2UHk5VUkxZUVpdjRJTTQ4UUxtQytECjZ2dlNRUXQzc0dxVXJjTlFPejRROUQ1RmJMVThYWFFqZEhFVThWY1oyQlBSWU1tZHRwS0FVUUo2M3MxVDliR3kKaUhMWFdRb1BFWkpZN3lLWERjYmQ2T20zKzB4VXhEVFlmdTExdDk5dHBLaWFmM0plcFVHTEMvN0pwZ2hjN21seApmUWVhSWtoTGphSGZJalNBUWV6UTJqd1hmWGJib1AvNXp2dENhK0Q1QVFLQmdRRGtvYVhHZHBNNC9yUFgyTThaCk1wSlhBZEhvRE5CRDVWVXlqTnphRnBvTFFZa1lDWHdESHgvZnR5VDJHcnJibmdScEdzdndzWEVnS09NK3BGOGoKeVI0OG9LU1A3b1lpMFp5SUpKVERJZ2djczltMXRUVk1xd0NLNGVqTmNzZnV4ZGhma255U2l2bm45N0VnaFUrOQpQREpCamdRZys2NytIdEtBQmRzNG9mSnVnUUtCZ1FEZDM2ZGNvSEJoc1liYWlEM2cyVFdHTDhHeDNNWEI2dlQyCjFJamlISFg0U0xEQi9nMWlGUW5jQkkzM01SV3Q1ajhVc3dZWnE5bWdSZDlXNy95eDNLcFlYUU8rdDZKakcrVkQKVm43T2xPQ2s5Y3FCN0Vhbm9ZaFRmcXVSbGhYUkoxK1gyem9ITmJ6Zm0xbU5YNWtKeWdPZ1BlYy9PWmpZWnV1ZQpkWnFVUHpDS2x3S0JnUUNkVmRCckh6WjVSbTRrNEFMRTZGUmtvOC83ekxXVnc4QTVwVTVtbHA4emw4bnNUTHFXCnUxaUg3OVR6dDdsWFlRclArYXpXMm9WckNEZzU1NHZCYUtueU51cUlVakRrelRMWFRiVkNBNjFwY3lzakxLeWQKTWlDUmNwMXVwTXFKdW9VSVlCcHlEa21rSklRMFNRbHl4SFloUHNZQW5MTDBGZW51U0Jsd2c2SCtBUUtCZ0FxWgprOHBXRStobTdkVms2dWZpR0srSGVvNS95S2JpRGIvNUdXME1CUXJPbGpzK1l1OUxxQVB0NThabWZ1Mm04VjZRCnl2clEvZjJ0T09WVFk5UXZnYWZsRWJlNzJETTU4dk1MbjlWSXdZNi8rMlBIb1B5RE9Cc2RFeDlLZDFKZHVqQnMKRmRLZ1c1eW5ORmtQdHlVRGxMYkVIbGNqN2t4d1psTlV0UVRlRy9pVEFvR0FHb1d5QmJBeWcxbjhWcVBZMm9oTgpnR0FNRm13bzBtQjhWZXN2TFh4TzZubFlKMUI5RWRHVDNvR0FFSzFkc084ZzR1WXMwMkFQYWxEUnFoaUIzV2JGCnVPQ1dxRDljZ2Z6TGdWaXdkOHVLTXFhSHNOTjRWeXpIZ0UyYWY3QnlLa2czQWRBUXFDNk56ZXVvRzRhYm14OWcKTzRwVWFqZWdBd3Nic2lUektmYVpUNEE9Ci0tLS0tRU5EIFBSSVZBVEUgS0VZLS0tLS0K
-
-
-
- API
- api
-
- https://github.com/jaredhendrickson13/pfsense-api
- System
- %%PKGVERSION%%
- api.xml
- jaredhendrickson13@gmail.com
-
-
-
- lan
- jwt
- json
- e568b85aed939826ead0199aa4238e399c9e12806f4dc655f8ee4a74818a19a2
- 3600
- sha256
- 16
-
-
-
-
-
- /api/
-
-
-
-
- /api/v1/
-
-
-
-
-
-
-
-
- WANGW
-
- wan
- $${IP_GATEWAY_WAN}
- WANGW
-
- inet
-
-
-
-
- 5
- enabled
-
-
- auto
- lan
- yes
- yes
-
-
-
-
-
-
-
-
-
- edison
- !
-
-
-
-
- kind: ConfigMap
- metadata:
- name: pfsense-vm-files
- ---
- apiVersion: v1
- data:
- credentials: VVJJX1RFTVBMQVRFOiAicWVtdStzc2g6Ly8lc0Alcy9zeXN0ZW0/a2V5ZmlsZT0lcyZzb2NrZXQ9JXMma25vd25faG9zdHNfdmVyaWZ5PWlnbm9yZSI=
- kind: Secret
- metadata:
- name: libvirt-account-creds
- type: Opaque
- ---
- apiVersion: terraform.core.oam.dev/v1beta1
- kind: Provider
- metadata:
- name: libvirt
- spec:
- provider: custom
- credentials:
- source: Secret
- secretRef:
- name: libvirt-account-creds
- key: credentials
- ---
- apiVersion: terraform.core.oam.dev/v1beta1
- kind: Configuration
- metadata:
- name: pfsense-gateway-vm-config
- spec:
- volumeSpec:
- volumeMounts:
- - name: libvirt
- mountPath: /var/run/libvirt/libvirt-sock
- - name: spectro-directory
- mountPath: /opt/spectrocloud
- - mountPath: "/var/files"
- name: files-vol
- volumes:
- - name: files-vol
- configMap:
- name: pfsense-vm-files
- - name: libvirt
- hostPath:
- path: /var/run/libvirt/libvirt-sock
- type: Socket
- - name: spectro-directory
- hostPath:
- path: /opt/spectrocloud
- type: Directory
- deleteResource: true
- variable:
- VM_NAME: "pfsense-gateway-vm"
- NTP: FROM_SECRET_REF
- DNS_1: FROM_SECRET_REF
- DNS_2: FROM_SECRET_REF
- IP_ADDR_WAN: FROM_SECRET_REF
- IP_GATEWAY_WAN: FROM_SECRET_REF
- SUBNET_WAN: FROM_SECRET_REF
- IP_ADDR_LAN: FROM_SECRET_REF
- SUBNET_LAN: FROM_SECRET_REF
- DHCP_RANGE_START: FROM_SECRET_REF
- DHCP_RANGE_END: FROM_SECRET_REF
- providerRef:
- name: libvirt
- hcl: |
- terraform {
- required_version = ">= 0.13"
- required_providers {
- libvirt = {
- source = "dmacvicar/libvirt"
- version = "0.6.14"
- }
- }
- }
-
- ##### VARIABLES #####
- variable "URI_TEMPLATE" {
- type = string
- default = "qemu:///system"
- }
-
- variable "NTP" {
- type = string
- }
-
- variable "DNS_1" {
- type = string
- }
-
- variable "DNS_2" {
- type = string
- }
-
- variable "IP_ADDR_WAN" {
- type = string
- }
-
- variable "SUBNET_WAN" {
- type = string
- }
-
- variable "IP_GATEWAY_WAN" {
- type = string
- }
-
- variable "IP_ADDR_LAN" {
- type = string
- }
-
- variable "SUBNET_LAN" {
- type = string
- }
-
- variable "DHCP_RANGE_START" {
- type = string
- }
-
- variable "DHCP_RANGE_END" {
- type = string
- }
-
- variable "LIBVIRT_SOCKET" {
- type = string
- default = "/var/run/libvirt/libvirt-sock"
- }
-
- variable "VM_NAME" {
- type = string
- default = "pfsense-terraform"
- }
-
- ##### OUTPUT #####
- output "config_value" {
- value = data.template_file.config.rendered
- }
-
- output "user_data_value" {
- value = data.template_file.user_data.rendered
- }
-
- ##### PROVIDER #####
- provider "libvirt" {
- uri = "qemu:///system"
- }
-
- data "template_file" "config" {
- template = file("/var/files/config-xml.tmpl")
- vars = {
- NTP = var.NTP
- DNS_1 = var.DNS_1
- DNS_2 = var.DNS_2
- IP_ADDR_WAN = var.IP_ADDR_WAN
- SUBNET_WAN = var.SUBNET_WAN
- IP_GATEWAY_WAN = var.IP_GATEWAY_WAN
- IP_ADDR_LAN = var.IP_ADDR_LAN
- SUBNET_LAN = var.SUBNET_LAN
- DHCP_RANGE_START = var.DHCP_RANGE_START
- DHCP_RANGE_END = var.DHCP_RANGE_END
- }
- }
-
- data "template_file" "user_data" {
- template = file("/var/files/user-data.tmpl")
- vars = {
- CONFIG_XML = base64encode(data.template_file.config.rendered)
- }
- }
-
- resource "libvirt_pool" "ubuntu" {
- name = "tf-pool"
- type = "dir"
- path = "/var/lib/libvirt/tf-pool"
- }
-
- resource "libvirt_volume" "ubuntu-qcow2" {
- name = "pfsense-qcow2"
- pool = libvirt_pool.ubuntu.name
- source = "/opt/spectrocloud/vm-operator/pfsense-cloudinit.qcow2"
- format = "qcow2"
- }
-
- resource "libvirt_cloudinit_disk" "commoninit" {
- name = "pfsense.iso"
- user_data = data.template_file.user_data.rendered
- pool = libvirt_pool.ubuntu.name
- }
-
- resource "libvirt_domain" "domain-ubuntu" {
- name = var.VM_NAME
- memory = "2048"
- vcpu = 1
-
- cloudinit = libvirt_cloudinit_disk.commoninit.id
-
- network_interface {
- bridge = "br0"
- }
-
- network_interface {
- bridge = "br1"
- }
-
- console {
- type = "pty"
- target_port = "0"
- target_type = "serial"
- }
-
- disk {
- volume_id = libvirt_volume.ubuntu-qcow2.id
- }
-
- graphics {
- type = "vnc"
- listen_type = "address"
- listen_address = "0.0.0.0"
- autoport = true
- }
- }
- version: 1.0.0
- values: |-
- pack:
- spectrocloud.com/manifest-type: "vm"
- spectrocloud.com/install-priority: "0"
- - name: ehl-vault-webservice
- type: helm
- layer: addon
- registry_uid: 61e915a6b7e45d9d6e6a2084
- registry: helm-blr-ees
- manifests:
- - name: ehl-vault-webservice
- content: |-
- pack:
- namespace: "default"
- spectrocloud.com/install-priority: "800"
- releaseNameOverride:
- ehl-vault-webservice: ehl-vault-webservice
- version: 1.0.0
- tag: 1.0.0
- values: |-
- pack:
- namespace: "default"
- spectrocloud.com/install-priority: "800"
- releaseNameOverride:
- ehl-vault-webservice: ehl-vault-webservice
diff --git a/examples/libvirt/config/profile/profile1-export.yaml b/examples/libvirt/config/profile/profile1-export.yaml
deleted file mode 100644
index 6f472ef..0000000
--- a/examples/libvirt/config/profile/profile1-export.yaml
+++ /dev/null
@@ -1,227 +0,0 @@
-name: ehl-dicom-NC_test
-uid: 620a7eb211667f4c46f57fdf
-description: dicom profile
-type: add-on
-packs:
- - name: eis-series-install-notifier
- type: helm
- layer: addon
- registry_uid: 622a58174a85b30ed2a9875a
- registry: helm-blr-ees
- version: 1.0.0-58eadf5
- tag: 1.0.0-58eadf5
- values: |-
- pack:
- namespace: "core"
- spectrocloud.com/install-priority: "700"
- releaseNameOverride:
- eis-series-install-notifier: eis-series-install-notifier
- content:
- charts:
- - repo: https://blr-artifactory.cloud/artifactory/helm-ees-all/
- name: eis-series-install-notifier
- version: 1.0.0-58eadf5
- images:
- - blr-artifactory.cloud/docker-eis-all/eis_series_install_notifier/eis_series_install_notifier:1.0.0-58eadf5
- - name: eis-dcm-deletion-service
- type: helm
- layer: addon
- registry_uid: 622a58174a85b30ed2a9875a
- registry: helm-blr-ees
- version: 1.0.1-463f4d9
- tag: 1.0.1-463f4d9
- values: |-
- pack:
- namespace: "core"
- spectrocloud.com/install-priority: "700"
- releaseNameOverride:
- eis-dcm-deletion-service: eis-dcm-deletion-service
- content:
- charts:
- - repo: https://blr-artifactory.cloud/artifactory/helm-ees-all/
- name: eis-dcm-deletion-service
- version: 1.0.1-463f4d9
- images:
- - blr-artifactory.cloud/docker-eis-all/eis_dicom_service_deletion/eis_dicom_service_deletion:1.0.1-463f4d9
- - name: eis-dcm-auto-delete-service
- type: helm
- layer: addon
- registry_uid: 622a58174a85b30ed2a9875a
- registry: helm-blr-ees
- version: 1.0.0-48aaaff
- tag: 1.0.0-48aaaff
- values: |-
- pack:
- namespace: "core"
- spectrocloud.com/install-priority: "700"
- releaseNameOverride:
- eis-dcm-auto-delete-service: eis-dcm-auto-delete-service
- content:
- charts:
- - repo: https://blr-artifactory.cloud/artifactory/helm-ees-all/
- name: eis-dcm-auto-delete-service
- version: 1.0.0-48aaaff
- images:
- - blr-artifactory.cloud/docker-eis-all/eis_dcm_auto_delete_service/eis_dcm_auto_delete_service:1.0.0-48aaaff
- - name: eis-dcm-services-stow
- type: helm
- layer: addon
- registry_uid: 622a58174a85b30ed2a9875a
- registry: helm-blr-ees
- version: 1.1.0-e7042f9
- tag: 1.1.0-e7042f9
- values: |-
- pack:
- namespace: "core"
- spectrocloud.com/install-priority: "700"
- releaseNameOverride:
- eis-dcm-services-stow: eis-dcm-services-stow
- content:
- charts:
- - repo: https://blr-artifactory.cloud/artifactory/helm-ees-all/
- name: eis-dcm-services-stow
- version: 1.1.0-e7042f9
- images:
- - blr-artifactory.cloud/docker-eis-all/eis-dicom-services-stow/eis-dicom-services-stow:1.1.0-e7042f9
- - name: eis-dcm-services-query
- type: helm
- layer: addon
- registry_uid: 622a58174a85b30ed2a9875a
- registry: helm-blr-ees
- version: 1.1.0-20a7a47
- tag: 1.1.0-20a7a47
- values: |-
- pack:
- namespace: "core"
- spectrocloud.com/install-priority: "700"
- releaseNameOverride:
- eis-dcm-services-query: eis-dcm-services-query
- content:
- charts:
- - repo: https://blr-artifactory.cloud/artifactory/helm-ees-all/
- name: eis-dcm-services-query
- version: 1.1.0-20a7a47
- images:
- - blr-artifactory.cloud/docker-eis-all/eis_dicom_services_query/eis_dicom_services_query:1.1.0-20a7a47
- - name: eis-dcm-services-wado
- type: helm
- layer: addon
- registry_uid: 622a58174a85b30ed2a9875a
- registry: helm-blr-ees
- version: 20.0.1-6b17e72
- tag: 20.0.1-6b17e72
- values: |-
- pack:
- namespace: "core"
- spectrocloud.com/install-priority: "700"
- releaseNameOverride:
- eis-dcm-services-wado: eis-dcm-services-wado
- content:
- charts:
- - repo: https://blr-artifactory.cloud/artifactory/helm-ees-all/
- name: eis-dcm-services-wado
- version: 20.0.1-6b17e72
- images:
- - blr-artifactory.cloud/docker-eis-all/eis_dicom_services_wado/eis_dicom_services_wado:20.0.1-6b17e72
- - name: ehl-redis
- type: helm
- layer: addon
- registry_uid: 622a58174a85b30ed2a9875a
- registry: helm-blr-ees
- version: 0.0.1-da507a4
- tag: 0.0.1-da507a4
- values: |-
- pack:
- namespace: "core"
- spectrocloud.com/install-priority: "700"
- releaseNameOverride:
- ehl-redis: ehl-redis
- content:
- charts:
- - repo: https://blr-artifactory.cloud/artifactory/helm-ees-all/
- name: ehl-redis
- version: 0.0.1-da507a4
- images:
- - blr-artifactory.cloud/docker-eis-all/redis:5.0.7
- - blr-artifactory.cloud/docker-eis-all/ehl-redis/redis-sentinel:5.0.7-debian-10-r17
- - name: eis-dcm-uid-service
- type: helm
- layer: addon
- registry_uid: 622a58174a85b30ed2a9875a
- registry: helm-blr-ees
- version: 0.0.3-fcc3cd8
- tag: 0.0.3-fcc3cd8
- values: |-
- pack:
- namespace: "core"
- releaseNameOverride:
- eis-dcm-uid-service: eis-dcm-uid-service
- spectrocloud.com/install-priority: "700"
- content:
- charts:
- - repo: https://blr-artifactory.cloud/artifactory/helm-ees-all/
- name: eis-dcm-uid-service
- version: 0.0.3-fcc3cd8
- images:
- - blr-artifactory.cloud/docker-eis-all/eis_dcm_uid_service/eis_dcm_uid_service:0.0.3-fcc3cd8
- - name: eis-dcm-remote-query
- type: helm
- layer: addon
- registry_uid: 622a58174a85b30ed2a9875a
- registry: helm-blr-ees
- version: 1.0.0-acd567a
- tag: 1.0.0-acd567a
- values: |-
- pack:
- namespace: "core"
- spectrocloud.com/install-priority: "700"
- releaseNameOverride:
- eis-dcm-remote-query: eis-dcm-remote-query
- content:
- charts:
- - repo: https://blr-artifactory.cloud/artifactory/helm-ees-all/
- name: eis-dcm-remote-query
- version: 1.0.0-acd567a
- images:
- - blr-artifactory.cloud/docker-eis-all/eis-dcm-remote-query/eis-dcm-remote-query:1.0.0-acd567a
- - name: transformer
- type: helm
- layer: addon
- registry_uid: 622a58174a85b30ed2a9875a
- registry: helm-blr-ees
- version: 1.0.0-e593de3
- tag: 1.0.0-e593de3
- values: |-
- pack:
- namespace: "core"
- releaseNameOverride:
- transformer: transformer
- content:
- charts:
- - repo: https://blr-artifactory.cloud/artifactory/helm-ees-all/
- name: transformer
- version: 1.0.0-e593de3
- images:
- - blr-artifactory.cloud/docker-eis-all/transformer/transformer:1.0.0-e593de3
- spectrocloud.com/install-priority: "700"
- - name: eis-dcm-scp
- type: helm
- layer: addon
- registry_uid: 622a58174a85b30ed2a9875a
- registry: helm-blr-ees
- version: 1.1.0-bb42bbb
- tag: 1.1.0-bb42bbb
- values: |-
- pack:
- namespace: "core"
- spectrocloud.com/install-priority: "700"
- releaseNameOverride:
- eis-dcm-scp: eis-dcm-scp
- content:
- charts:
- - repo: https://blr-artifactory.cloud/artifactory/helm-ees-all/
- name: eis-dcm-scp
- version: 1.1.0-bb42bbb
- images:
- - blr-artifactory.cloud/docker-eis-all/eis_dicom_services_scp/eis_dicom_services_scp:1.1.0-bb42bbb
- - blr-artifactory.cloud/docker-eis-all/trust-store-watcher/key_trust_watcher:1.0.0-928b21b
diff --git a/examples/libvirt/config/profile/profile1-infra.yaml b/examples/libvirt/config/profile/profile1-infra.yaml
deleted file mode 100644
index e24c194..0000000
--- a/examples/libvirt/config/profile/profile1-infra.yaml
+++ /dev/null
@@ -1,303 +0,0 @@
-name: profile_infra
-description: profile_infra
-type: cluster
-defaultRepoURL: "${REPO_URL}"
-packs:
- - name: "amazon-linux-eks"
- type: "spectro"
- layer: "os"
- server: "${SPECTRO_REPO_URL}"
- version: "1.0.0"
- values: "# AMI will be selected automatically based on the kubernetes version, region chosen"
- - name: "kubernetes-eks"
- type: "spectro"
- layer: "k8s"
- server: "${SPECTRO_REPO_URL}"
- version: "1.20"
- values: |
- ## EKS settings
- managedControlPlane:
-
- ## Controlplane Logging
- logging:
-
- # Setting to toggle Kubernetes API Server logging (kube-apiserver)
- apiServer: false
-
- # Setting to toggle the Kubernetes API audit logging
- audit: false
-
- # Setting to toggle the cluster authentication logging
- authenticator: false
-
- # Setting to toggle the controller manager (kube-controller-manager) logging
- controllerManager: false
-
- # Setting to toggle the Kubernetes scheduler (kube-scheduler) logging
- scheduler: false
-
- # OIDC related config
- oidcIdentityProvider:
- identityProviderConfigName: '%OIDC_IDENTITY_PROVIDER_NAME%' # The name of the OIDC provider configuration
- issuerUrl: '%OIDC_ISSUER_URL%' # The URL of the OpenID identity provider
- clientId: '%OIDC_CLAIM_NAME%' # The ID for the client application that makes authentication requests to the OpenID identity provider
- usernameClaim: "email" # The JSON Web Token (JWT) claim to use as the username
- usernamePrefix: "-" # The prefix that is prepended to username claims to prevent clashes with existing names
- groupsClaim: "groups" # The JWT claim that the provider uses to return your groups
- groupsPrefix: "-" # The prefix that is prepended to group claims to prevent clashes with existing names
- requiredClaims: # The key value pairs that describe required claims in the identity token
-
- ## A list of additional policies to attach to the control plane role
- #roleAdditionalPolicies:
- #- {{ arn for the policy1 | format "${string}" }}
- #- {{ arn for the policy2 | format "${string}" }}
-
- ## Custom EKS node roleName for all the node groups
- ## This self-managed IAM role(https://docs.aws.amazon.com/eks/latest/userguide/create-node-role.html) should include the required
- ## policies - 'AmazonEKSWorkerNodePolicy','AmazonEC2ContainerRegistryReadOnly', 'AmazonSSMManagedInstanceCore' and 'AmazonEKS_CNI_Policy'
- ## NOTE: The roleName cannot have the prefix 'ng-role_' since this is reserved for spectro-generated roles
- managedMachinePool:
- roleName: "%MACHINE_POOL_ROLE_NAME%"
-
- ## A list of additional policies to attach to the node group role
- #roleAdditionalPolicies:
- #- {{ arn for the policy1 | format "${string}" }}
- #- {{ arn for the policy2 | format "${string}" }}
-
- ## Client configuration to add OIDC based authentication flags in kubeconfig
- clientConfig:
- oidc-issuer-url: "{{ .spectro.pack.kubernetes-eks.managedControlPlane.oidcIdentityProvider.issuerUrl }}"
- oidc-client-id: "{{ .spectro.pack.kubernetes-eks.managedControlPlane.oidcIdentityProvider.clientId }}"
- oidc-client-secret: 1gsranjjmdgahm10j8r6m47ejokm9kafvcbhi3d48jlc3rfpprhv
- oidc-extra-scope: profile,email
- - name: "cni-aws-vpc-eks"
- layer: "cni"
- server: "${SPECTRO_REPO_URL}"
- type: "spectro"
- values: "# AWS VPC CNI will be auto-installed"
- version: "1.0"
-
- - name: "csi-aws"
- layer: "csi"
- server: "${SPECTRO_REPO_URL}"
- type: "spectro"
- version: "1.0.0"
- values: |
- manifests:
- aws_ebs:
-
- #Storage type should be one of io1, gp2, sc1, st1 types
- #Check https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ebs-volume-types.html for more details
- storageType: \"gp2\"
-
- #Allowed reclaim policies are Delete, Retain
- reclaimPolicy: \"Delete\"
-
- #Toggle for Volume expansion
- allowVolumeExpansion: \"true\"
-
- #Toggle for Default class
- isDefaultClass: \"true\"
-
- #Supported binding modes are Immediate, WaitForFirstConsumer
- #Setting this to WaitForFirstConsumer for AWS, so that the volumes gets created in the same AZ as that of the pods
- volumeBindingMode: \"WaitForFirstConsumer\"
- - name: "aws-ssm-agent"
- layer: "addon"
- server: "${SPECTRO_REPO_URL}"
- type: "spectro"
- version: "1.0.0"
- values: |
- pack:
- spectrocloud.com/install-priority: "0"
- manifests:
- # Amazon Linux 2 doesn't have a provision to install ssm agent by default and will be fixed in future ami's
- # https://github.com/aws/containers-roadmap/issues/593#issuecomment-823768607
- # Meanwhile, this Daemonset will help install aws ssm agent into all nodes
- aws-ssm-agent:
- contents: >
- apiVersion: apps/v1
-
- kind: DaemonSet
-
- metadata:
- name: eks-host-config
- namespace: kube-system
- spec:
- selector:
- matchLabels:
- app: eks-host-config
- template:
- metadata:
- name: eks-host-config
- labels:
- app: eks-host-config
- spec:
- initContainers:
- - name: ssm-install-unit
- image: debian:buster
- command:
- - sh
- - -c
- - |
- set -x
-
- # Add unit file to install the SSM agent
- cat >/etc/systemd/system/install-ssm.service < Applying kernel parameters for Kubelet"'
- - 'sysctl -p /etc/sysctl.d/90-kubelet.conf'
- postKubeadmCommands:
- # Apply the privileged PodSecurityPolicy on the first master node ; Otherwise, CNI (and other) pods won't come up
- # Sometimes api server takes a little longer to respond. Retry if applying the pod-security-policy manifest fails
- - 'export KUBECONFIG=/etc/kubernetes/admin.conf && [ -f "$KUBECONFIG" ] && { echo " ====> Applying PodSecurityPolicy" ; until $(kubectl apply -f /etc/kubernetes/hardening/privileged-psp.yaml > /dev/null ); do echo "Failed to apply PodSecurityPolicies, will retry in 5s" ; sleep 5 ; done ; } || echo "Skipping PodSecurityPolicy for worker nodes"'
-
- # Client configuration to add OIDC based authentication flags in kubeconfig
- #clientConfig:
- #oidc-issuer-url: "{{ .spectro.pack.kubernetes.kubeadmconfig.apiServer.extraArgs.oidc-issuer-url }}"
- #oidc-client-id: "{{ .spectro.pack.kubernetes.kubeadmconfig.apiServer.extraArgs.oidc-client-id }}"
- #oidc-client-secret: 1gsranjjmdgahm10j8r6m47ejokm9kafvcbhi3d48jlc3rfpprhv
- #oidc-extra-scope: profile,email
- - name: cni-calico
- type: spectro
- layer: cni
- registry: Public Repo
- version: 3.19.0
- tag: 3.19.0
- values: |-
- pack:
- content:
- images:
- - image: gcr.io/spectro-images-public/calico/kube-controllers:v3.19.0
- - image: gcr.io/spectro-images-public/calico/node:v3.19.0
- - image: gcr.io/spectro-images-public/calico/cni:v3.19.0
- - image: gcr.io/spectro-images-public/calico/pod2daemon-flexvol:v3.19.0
-
- manifests:
- calico:
-
- # IPAM type to use. Supported types are calico-ipam, host-local
- ipamType: "calico-ipam"
-
- # Should be one of CALICO_IPV4POOL_IPIP or CALICO_IPV4POOL_VXLAN
- encapsulationType: "CALICO_IPV4POOL_IPIP"
-
- # Should be one of Always, CrossSubnet, Never
- encapsulationMode: "Always"
- - name: csi-rook-ceph
- type: spectro
- layer: csi
- registry: Public Repo
- version: 1.8.0
- tag: 1.8.0
- values: |-
- pack:
- content:
- images:
- - image: k8s.gcr.io/sig-storage/csi-node-driver-registrar:v2.3.0
- - image: k8s.gcr.io/sig-storage/csi-attacher:v3.3.0
- - image: k8s.gcr.io/sig-storage/csi-snapshotter:v4.2.0
- - image: k8s.gcr.io/sig-storage/csi-resizer:v1.3.0
- - image: k8s.gcr.io/sig-storage/csi-provisioner:v3.0.0
- - image: quay.io/cephcsi/cephcsi:v3.4.0
- - image: quay.io/ceph/ceph:v16.2.7
- - image: docker.io/rook/ceph:v1.8.0
-
- manifests:
- storageclass:
- contents: |
- apiVersion: ceph.rook.io/v1
- kind: CephFilesystem
- metadata:
- name: myfs
- namespace: rook-ceph # namespace:cluster
- spec:
- # The metadata pool spec. Must use replication.
- metadataPool:
- replicated:
- size: 3
- requireSafeReplicaSize: true
- parameters:
- # Inline compression mode for the data pool
- # Further reference: https://docs.ceph.com/docs/master/rados/configuration/bluestore-config-ref/#inline-compression
- compression_mode:
- none
- # gives a hint (%) to Ceph in terms of expected consumption of the total cluster capacity of a given pool
- # for more info: https://docs.ceph.com/docs/master/rados/operations/placement-groups/#specifying-expected-pool-size
- #target_size_ratio: ".5"
- # The list of data pool specs. Can use replication or erasure coding.
- dataPools:
- - name: replicated
- failureDomain: host
- replicated:
- size: 3
- # Disallow setting pool with replica 1, this could lead to data loss without recovery.
- # Make sure you're *ABSOLUTELY CERTAIN* that is what you want
- requireSafeReplicaSize: true
- parameters:
- # Inline compression mode for the data pool
- # Further reference: https://docs.ceph.com/docs/master/rados/configuration/bluestore-config-ref/#inline-compression
- compression_mode:
- none
- # gives a hint (%) to Ceph in terms of expected consumption of the total cluster capacity of a given pool
- # for more info: https://docs.ceph.com/docs/master/rados/operations/placement-groups/#specifying-expected-pool-size
- #target_size_ratio: ".5"
- # Whether to preserve filesystem after CephFilesystem CRD deletion
- preserveFilesystemOnDelete: true
- # The metadata service (mds) configuration
- metadataServer:
- # The number of active MDS instances
- activeCount: 1
- # Whether each active MDS instance will have an active standby with a warm metadata cache for faster failover.
- # If false, standbys will be available, but will not have a warm cache.
- activeStandby: true
- # The affinity rules to apply to the mds deployment
- placement:
- # nodeAffinity:
- # requiredDuringSchedulingIgnoredDuringExecution:
- # nodeSelectorTerms:
- # - matchExpressions:
- # - key: role
- # operator: In
- # values:
- # - mds-node
- # topologySpreadConstraints:
- # tolerations:
- # - key: mds-node
- # operator: Exists
- # podAffinity:
- podAntiAffinity:
- requiredDuringSchedulingIgnoredDuringExecution:
- - labelSelector:
- matchExpressions:
- - key: app
- operator: In
- values:
- - rook-ceph-mds
- # topologyKey: kubernetes.io/hostname will place MDS across different hosts
- topologyKey: kubernetes.io/hostname
- preferredDuringSchedulingIgnoredDuringExecution:
- - weight: 100
- podAffinityTerm:
- labelSelector:
- matchExpressions:
- - key: app
- operator: In
- values:
- - rook-ceph-mds
- # topologyKey: */zone can be used to spread MDS across different AZ
- # Use in k8s cluster if your cluster is v1.16 or lower
- # Use in k8s cluster is v1.17 or upper
- topologyKey: topology.kubernetes.io/zone
- # A key/value list of annotations
- annotations:
- # key: value
- # A key/value list of labels
- labels:
- # key: value
- resources:
- # The requests and limits set here, allow the filesystem MDS Pod(s) to use half of one CPU core and 1 gigabyte of memory
- # limits:
- # cpu: "500m"
- # memory: "1024Mi"
- # requests:
- # cpu: "500m"
- # memory: "1024Mi"
- # priorityClassName: my-priority-class
- # Filesystem mirroring settings
- # mirroring:
- # enabled: true
- # list of Kubernetes Secrets containing the peer token
- # for more details see: https://docs.ceph.com/en/latest/dev/cephfs-mirroring/#bootstrap-peers
- # peers:
- #secretNames:
- #- secondary-cluster-peer
- # specify the schedule(s) on which snapshots should be taken
- # see the official syntax here https://docs.ceph.com/en/latest/cephfs/snap-schedule/#add-and-remove-schedules
- # snapshotSchedules:
- # - path: /
- # interval: 24h # daily snapshots
- # startTime: 11:55
- # manage retention policies
- # see syntax duration here https://docs.ceph.com/en/latest/cephfs/snap-schedule/#add-and-remove-retention-policies
- # snapshotRetention:
- # - path: /
- # duration: "h 24"
- ---
- apiVersion: storage.k8s.io/v1
- kind: StorageClass
- metadata:
- name: standard
- annotations:
- storageclass.kubernetes.io/is-default-class: "true"
- # Change "rook-ceph" provisioner prefix to match the operator namespace if needed
- provisioner: rook-ceph.cephfs.csi.ceph.com # driver:namespace:operator
- parameters:
- # clusterID is the namespace where the rook cluster is running
- # If you change this namespace, also change the namespace below where the secret namespaces are defined
- clusterID: rook-ceph # namespace:cluster
-
- # CephFS filesystem name into which the volume shall be created
- fsName: myfs
-
- # Ceph pool into which the volume shall be created
- # Required for provisionVolume: "true"
- pool: myfs-data0
-
- # The secrets contain Ceph admin credentials. These are generated automatically by the operator
- # in the same namespace as the cluster.
- csi.storage.k8s.io/provisioner-secret-name: rook-csi-cephfs-provisioner
- csi.storage.k8s.io/provisioner-secret-namespace: rook-ceph # namespace:cluster
- csi.storage.k8s.io/controller-expand-secret-name: rook-csi-cephfs-provisioner
- csi.storage.k8s.io/controller-expand-secret-namespace: rook-ceph # namespace:cluster
- csi.storage.k8s.io/node-stage-secret-name: rook-csi-cephfs-node
- csi.storage.k8s.io/node-stage-secret-namespace: rook-ceph # namespace:cluster
-
- # (optional) The driver can use either ceph-fuse (fuse) or ceph kernel client (kernel)
- # If omitted, default volume mounter will be used - this is determined by probing for ceph-fuse
- # or by setting the default mounter explicitly via --volumemounter command-line argument.
- # mounter: kernel
- reclaimPolicy: Delete
- allowVolumeExpansion: true
- #Supported binding modes are Immediate, WaitForFirstConsumer
- volumeBindingMode: "WaitForFirstConsumer"
- mountOptions:
- # uncomment the following line for debugging
- #- debug
-
- cluster:
- contents: |
- apiVersion: ceph.rook.io/v1
- kind: CephCluster
- metadata:
- name: rook-ceph
- namespace: rook-ceph # namespace:cluster
- spec:
- cephVersion:
- # The container image used to launch the Ceph daemon pods (mon, mgr, osd, mds, rgw).
- # v15 is octopus, and v16 is pacific.
- # RECOMMENDATION: In production, use a specific version tag instead of the general v14 flag, which pulls the latest release and could result in different
- # versions running within the cluster. See tags available at https://hub.docker.com/r/ceph/ceph/tags/.
- # If you want to be more precise, you can always use a timestamp tag such quay.io/ceph/ceph:v16.2.7-20211208
- # This tag might not contain a new Ceph version, just security fixes from the underlying operating system, which will reduce vulnerabilities
- image: quay.io/ceph/ceph:v16.2.7
- # Whether to allow unsupported versions of Ceph. Currently `octopus` and `pacific` are supported.
- # Future versions such as `pacific` would require this to be set to `true`.
- # Do not set to true in production.
- allowUnsupported: false
- # The path on the host where configuration files will be persisted. Must be specified.
- # Important: if you reinstall the cluster, make sure you delete this directory from each host or else the mons will fail to start on the new cluster.
- # In Minikube, the '/data' directory is configured to persist across reboots. Use "/data/rook" in Minikube environment.
- dataDirHostPath: /var/lib/rook
- # Whether or not upgrade should continue even if a check fails
- # This means Ceph's status could be degraded and we don't recommend upgrading but you might decide otherwise
- # Use at your OWN risk
- # To understand Rook's upgrade process of Ceph, read https://rook.io/docs/rook/latest/ceph-upgrade.html#ceph-version-upgrades
- skipUpgradeChecks: false
- # Whether or not continue if PGs are not clean during an upgrade
- continueUpgradeAfterChecksEvenIfNotHealthy: false
- # WaitTimeoutForHealthyOSDInMinutes defines the time (in minutes) the operator would wait before an OSD can be stopped for upgrade or restart.
- # If the timeout exceeds and OSD is not ok to stop, then the operator would skip upgrade for the current OSD and proceed with the next one
- # if `continueUpgradeAfterChecksEvenIfNotHealthy` is `false`. If `continueUpgradeAfterChecksEvenIfNotHealthy` is `true`, then opertor would
- # continue with the upgrade of an OSD even if its not ok to stop after the timeout. This timeout won't be applied if `skipUpgradeChecks` is `true`.
- # The default wait timeout is 10 minutes.
- waitTimeoutForHealthyOSDInMinutes: 10
- mon:
- # Set the number of mons to be started. Generally recommended to be 3.
- # For highest availability, an odd number of mons should be specified.
- count: 3
- # The mons should be on unique nodes. For production, at least 3 nodes are recommended for this reason.
- # Mons should only be allowed on the same node for test environments where data loss is acceptable.
- allowMultiplePerNode: false
- mgr:
- # When higher availability of the mgr is needed, increase the count to 2.
- # In that case, one mgr will be active and one in standby. When Ceph updates which
- # mgr is active, Rook will update the mgr services to match the active mgr.
- count: 1
- modules:
- # Several modules should not need to be included in this list. The "dashboard" and "monitoring" modules
- # are already enabled by other settings in the cluster CR.
- - name: pg_autoscaler
- enabled: true
- # enable the ceph dashboard for viewing cluster status
- dashboard:
- enabled: true
- # serve the dashboard under a subpath (useful when you are accessing the dashboard via a reverse proxy)
- # urlPrefix: /ceph-dashboard
- # serve the dashboard at the given port.
- # port: 8443
- # serve the dashboard using SSL
- ssl: true
- # enable prometheus alerting for cluster
- monitoring:
- # requires Prometheus to be pre-installed
- enabled: false
- # namespace to deploy prometheusRule in. If empty, namespace of the cluster will be used.
- # Recommended:
- # If you have a single rook-ceph cluster, set the rulesNamespace to the same namespace as the cluster or keep it empty.
- # If you have multiple rook-ceph clusters in the same k8s cluster, choose the same namespace (ideally, namespace with prometheus
- # deployed) to set rulesNamespace for all the clusters. Otherwise, you will get duplicate alerts with multiple alert definitions.
- rulesNamespace: rook-ceph
- network:
- # enable host networking
- #provider: host
- # enable the Multus network provider
- #provider: multus
- #selectors:
- # The selector keys are required to be `public` and `cluster`.
- # Based on the configuration, the operator will do the following:
- # 1. if only the `public` selector key is specified both public_network and cluster_network Ceph settings will listen on that interface
- # 2. if both `public` and `cluster` selector keys are specified the first one will point to 'public_network' flag and the second one to 'cluster_network'
- #
- # In order to work, each selector value must match a NetworkAttachmentDefinition object in Multus
- #
- #public: public-conf --> NetworkAttachmentDefinition object name in Multus
- #cluster: cluster-conf --> NetworkAttachmentDefinition object name in Multus
- # Provide internet protocol version. IPv6, IPv4 or empty string are valid options. Empty string would mean IPv4
- #ipFamily: "IPv6"
- # Ceph daemons to listen on both IPv4 and Ipv6 networks
- #dualStack: false
- # enable the crash collector for ceph daemon crash collection
- crashCollector:
- disable: false
- # Uncomment daysToRetain to prune ceph crash entries older than the
- # specified number of days.
- #daysToRetain: 30
- # enable log collector, daemons will log on files and rotate
- # logCollector:
- # enabled: true
- # periodicity: 24h # SUFFIX may be 'h' for hours or 'd' for days.
- # automate [data cleanup process](https://github.com/rook/rook/blob/master/Documentation/ceph-teardown.md#delete-the-data-on-hosts) in cluster destruction.
- cleanupPolicy:
- # Since cluster cleanup is destructive to data, confirmation is required.
- # To destroy all Rook data on hosts during uninstall, confirmation must be set to "yes-really-destroy-data".
- # This value should only be set when the cluster is about to be deleted. After the confirmation is set,
- # Rook will immediately stop configuring the cluster and only wait for the delete command.
- # If the empty string is set, Rook will not destroy any data on hosts during uninstall.
- confirmation: ""
- # sanitizeDisks represents settings for sanitizing OSD disks on cluster deletion
- sanitizeDisks:
- # method indicates if the entire disk should be sanitized or simply ceph's metadata
- # in both case, re-install is possible
- # possible choices are 'complete' or 'quick' (default)
- method: quick
- # dataSource indicate where to get random bytes from to write on the disk
- # possible choices are 'zero' (default) or 'random'
- # using random sources will consume entropy from the system and will take much more time then the zero source
- dataSource: zero
- # iteration overwrite N times instead of the default (1)
- # takes an integer value
- iteration: 1
- # allowUninstallWithVolumes defines how the uninstall should be performed
- # If set to true, cephCluster deletion does not wait for the PVs to be deleted.
- allowUninstallWithVolumes: false
- # To control where various services will be scheduled by kubernetes, use the placement configuration sections below.
- # The example under 'all' would have all services scheduled on kubernetes nodes labeled with 'role=storage-node' and
- # tolerate taints with a key of 'storage-node'.
- # placement:
- # all:
- # nodeAffinity:
- # requiredDuringSchedulingIgnoredDuringExecution:
- # nodeSelectorTerms:
- # - matchExpressions:
- # - key: role
- # operator: In
- # values:
- # - storage-node
- # podAffinity:
- # podAntiAffinity:
- # topologySpreadConstraints:
- # tolerations:
- # - key: storage-node
- # operator: Exists
- # The above placement information can also be specified for mon, osd, and mgr components
- # mon:
- # Monitor deployments may contain an anti-affinity rule for avoiding monitor
- # collocation on the same node. This is a required rule when host network is used
- # or when AllowMultiplePerNode is false. Otherwise this anti-affinity rule is a
- # preferred rule with weight: 50.
- # osd:
- # mgr:
- # cleanup:
- annotations:
- # all:
- # mon:
- # osd:
- # cleanup:
- # prepareosd:
- # If no mgr annotations are set, prometheus scrape annotations will be set by default.
- # mgr:
- labels:
- # all:
- # mon:
- # osd:
- # cleanup:
- # mgr:
- # prepareosd:
- # monitoring is a list of key-value pairs. It is injected into all the monitoring resources created by operator.
- # These labels can be passed as LabelSelector to Prometheus
- # monitoring:
- # crashcollector:
- resources:
- # The requests and limits set here, allow the mgr pod to use half of one CPU core and 1 gigabyte of memory
- # mgr:
- # limits:
- # cpu: "500m"
- # memory: "1024Mi"
- # requests:
- # cpu: "500m"
- # memory: "1024Mi"
- # The above example requests/limits can also be added to the other components
- # mon:
- # osd:
- # For OSD it also is a possible to specify requests/limits based on device class
- # osd-hdd:
- # osd-ssd:
- # osd-nvme:
- # prepareosd:
- # mgr-sidecar:
- # crashcollector:
- # logcollector:
- # cleanup:
- # The option to automatically remove OSDs that are out and are safe to destroy.
- removeOSDsIfOutAndSafeToRemove: true
- # priorityClassNames:
- # all: rook-ceph-default-priority-class
- # mon: rook-ceph-mon-priority-class
- # osd: rook-ceph-osd-priority-class
- # mgr: rook-ceph-mgr-priority-class
- storage: # cluster level storage configuration and selection
- useAllNodes: true
- useAllDevices: true
- #deviceFilter:
- config:
- # crushRoot: "custom-root" # specify a non-default root label for the CRUSH map
- # metadataDevice: "md0" # specify a non-rotational storage so ceph-volume will use it as block db device of bluestore.
- # databaseSizeMB: "1024" # uncomment if the disks are smaller than 100 GB
- # journalSizeMB: "1024" # uncomment if the disks are 20 GB or smaller
- # osdsPerDevice: "1" # this value can be overridden at the node or device level
- # encryptedDevice: "true" # the default value for this option is "false"
- # Individual nodes and their config can be specified as well, but 'useAllNodes' above must be set to false. Then, only the named
- # nodes below will be used as storage resources. Each node's 'name' field should match their 'kubernetes.io/hostname' label.
- # nodes:
- # - name: "172.17.4.201"
- # devices: # specific devices to use for storage can be specified for each node
- # - name: "sdb"
- # - name: "nvme01" # multiple osds can be created on high performance devices
- # config:
- # osdsPerDevice: "5"
- # - name: "/dev/disk/by-id/ata-ST4000DM004-XXXX" # devices can be specified using full udev paths
- # config: # configuration can be specified at the node level which overrides the cluster level config
- # - name: "172.17.4.301"
- # deviceFilter: "^sd."
- # when onlyApplyOSDPlacement is false, will merge both placement.All() and placement.osd
- onlyApplyOSDPlacement: false
- # The section for configuring management of daemon disruptions during upgrade or fencing.
- disruptionManagement:
- # If true, the operator will create and manage PodDisruptionBudgets for OSD, Mon, RGW, and MDS daemons. OSD PDBs are managed dynamically
- # via the strategy outlined in the [design](https://github.com/rook/rook/blob/master/design/ceph/ceph-managed-disruptionbudgets.md). The operator will
- # block eviction of OSDs by default and unblock them safely when drains are detected.
- managePodBudgets: true
- # A duration in minutes that determines how long an entire failureDomain like `region/zone/host` will be held in `noout` (in addition to the
- # default DOWN/OUT interval) when it is draining. This is only relevant when `managePodBudgets` is `true`. The default value is `30` minutes.
- osdMaintenanceTimeout: 30
- # A duration in minutes that the operator will wait for the placement groups to become healthy (active+clean) after a drain was completed and OSDs came back up.
- # Operator will continue with the next drain if the timeout exceeds. It only works if `managePodBudgets` is `true`.
- # No values or 0 means that the operator will wait until the placement groups are healthy before unblocking the next drain.
- pgHealthCheckTimeout: 0
- # If true, the operator will create and manage MachineDisruptionBudgets to ensure OSDs are only fenced when the cluster is healthy.
- # Only available on OpenShift.
- manageMachineDisruptionBudgets: false
- # Namespace in which to watch for the MachineDisruptionBudgets.
- machineDisruptionBudgetNamespace: openshift-machine-api
-
- # healthChecks
- # Valid values for daemons are 'mon', 'osd', 'status'
- healthCheck:
- daemonHealth:
- mon:
- disabled: false
- interval: 45s
- osd:
- disabled: false
- interval: 60s
- status:
- disabled: false
- interval: 60s
- # Change pod liveness probe, it works for all mon,mgr,osd daemons
- livenessProbe:
- mon:
- disabled: false
- mgr:
- disabled: false
- osd:
- disabled: false
\ No newline at end of file
diff --git a/examples/local-simplified-yaml/admin/config/profile/profile-ehl-apps.yaml.x b/examples/local-simplified-yaml/admin/config/profile/profile-ehl-apps.yaml.x
deleted file mode 100644
index 6111a62..0000000
--- a/examples/local-simplified-yaml/admin/config/profile/profile-ehl-apps.yaml.x
+++ /dev/null
@@ -1,277 +0,0 @@
-name: ehl-apps
-description: ""
-type: add-on
-packs:
-- name: ehl-generic
- type: manifest
- layer: addon
- registry: helm-blr-ees
- manifests:
- - name: namespaces
- content: |-
- ---
- ###########################
- # NAME SPACES CREATION
- ############################
- apiVersion: v1
- kind: Namespace
- metadata:
- name: edison-system
- ---
- apiVersion: v1
- kind: Namespace
- metadata:
- name: ehl-control-pplane
- ---
- apiVersion: v1
- kind: Namespace
- metadata:
- name: edison-core
- ---
- apiVersion: v1
- kind: Namespace
- metadata:
- name: edison-policy
- ---
- apiVersion: v1
- kind: Namespace
- metadata:
- name: kubeaddons
- ---
- apiVersion: v1
- kind: Namespace
- metadata:
- name: edison-priority-scheduler
- ---
- - name: configmap
- content: |-
- apiVersion: v1
- kind: ConfigMap
- metadata:
- name: ehl-generic-map
- namespace: ehl-control-plane
- data:
- bootstrap_host: 192.168.100.10
- bootstrap_pswd: Minda00$
- bootstrap_user: root
- ehl_version: EHL-2.0-SC-dev
- version: 1.0.0
- values: |-
- pack:
- spectrocloud.com/install-priority: "-100"
-- name: ehl-monitoring
- type: helm
- layer: addon
- registry: helm-blr-ees
- version: 2.0.1-af15864
- tag: 2.0.1-af15864
- values: |-
- pack:
- namespace: "edison-system"
- spectrocloud.com/install-priority: "110"
- releaseNameOverride:
- ehl-monitoring: ehl-monitoring
- content:
- charts:
- - repo: https://blr-artifactory.cloud.health.ge.com/artifactory/helm-ees-all/
- name: ehl-monitoring
- version: 2.0.1-af15864
- images:
- - image: blr-artifactory.cloud.health.ge.com/docker-eis-all/prom-opr/prometheus:v2.22.1
- - image: blr-artifactory.cloud.health.ge.com/docker-eis-all/prom-opr/prometheus-operator:v0.44.0
- - image: blr-artifactory.cloud.health.ge.com/docker-eis-all/prom-opr/kube-webhook-certgen:v1.5.2
- - image: blr-artifactory.cloud.health.ge.com/docker-eis-all/prom-opr/configmap-reload:v0.4.0
- - image: blr-artifactory.cloud.health.ge.com/docker-eis-all/prom-opr/prometheus-config-reloader:v0.44.0
- - image: blr-artifactory.cloud.health.ge.com/docker-eis-all/prom-opr/alertmanager:v0.21.0
- - image: blr-artifactory.cloud.health.ge.com/docker-eis-all/prom-opr/kube-state-metrics:v1.9.7
- - image: blr-artifactory.cloud.health.ge.com/docker-eis-all/prom-opr/node-exporter:v1.0.1
- - image: blr-artifactory.cloud.health.ge.com/docker-eis-all/ehl-monitoring/chronyntpexporter:2.0.1-af15864
- - image: blr-artifactory.cloud.health.ge.com/docker-eis-all/ehl-monitoring/validationwebhook:2.0.1-af15864
- - image: blr-artifactory.cloud.health.ge.com/docker-eis-all/ehl-monitoring/ext_svc_config:2.0.1-af15864
- - image: blr-artifactory.cloud.health.ge.com/docker-eis-all/prom-opr/blackbox-exporter:v0.18.0
-
- external-svc-config:
- enabled: false
-- name: ehl-logging
- type: helm
- layer: addon
- registry: helm-blr-ees
- version: 0.0.1-9478efa
- tag: 0.0.1-9478efa
- values: |-
- pack:
- namespace: "edison-system"
- spectrocloud.com/install-priority: "120"
- releaseNameOverride:
- ehl-logging: ehl-logging
- content:
- charts:
- - repo: https://blr-artifactory.cloud.health.ge.com/artifactory/helm-ees-all/
- name: ehl-logging
- version: 0.0.1-9478efa
- images:
- - image: blr-artifactory.cloud.health.ge.com/docker-eis-all/logging-stack/fluentd:1.12.4-debian-10-r3
- - image: blr-artifactory.cloud.health.ge.com/docker-eis-all/ehl-logging/ehl-alpine-nginx:0.0.1-9478efa
- - image: blr-artifactory.cloud.health.ge.com/docker-eis-all/logging-stack/fluent-bit-plugin-loki:2.0.0-amd64
- - image: blr-artifactory.cloud.health.ge.com/docker-eis-all/logging-stack/loki:2.0.0
-- name: ehl-monitoring-security
- type: helm
- layer: addon
- registry: helm-blr-ees
- version: 0.0.1-30c63f5
- tag: 0.0.1-30c63f5
- values: |-
- pack:
- namespace: "edison-system"
- spectrocloud.com/install-priority: "130"
- releaseNameOverride:
- ehl-monitoring-security: ehl-monitoring-security
- content:
- charts:
- - repo: https://blr-artifactory.cloud.health.ge.com/artifactory/helm-ees-all/
- name: ehl-monitoring-security
- version: 0.0.1-30c63f5
-- name: ehl-grafana
- type: helm
- layer: addon
- registry: helm-blr-ees
- version: 0.0.1-4260767
- tag: 0.0.1-4260767
- values: |-
- pack:
- namespace: "edison-system"
- spectrocloud.com/install-priority: "140"
- releaseNameOverride:
- ehl-grafana: ehl-grafana
- content:
- charts:
- - repo: https://blr-artifactory.cloud.health.ge.com/artifactory/helm-ees-all/
- name: ehl-grafana
- version: 0.0.1-4260767
- images:
- - image: blr-artifactory.cloud.health.ge.com/docker-eis-all/logging-stack/grafana:8.3.4
- - image: blr-artifactory.cloud.health.ge.com/docker-eis-all/logging-stack/k8s-sidecar:1.15.4
- - image: blr-artifactory.cloud.health.ge.com/docker-eis-all/logging-stack/busybox:1.35.0
-- name: sprsnapshot-service
- type: helm
- layer: addon
- registry: helm-blr-ees
- version: 1.0.0-a7da622
- tag: 1.0.0-a7da622
- values: |-
- pack:
- namespace: "edison-system"
- releaseNameOverride:
- sprsnapshot-service: ehl-sprsnap-service
- spectrocloud.com/install-priority: "150"
- content:
- charts:
- - repo: https://blr-artifactory.cloud.health.ge.com/artifactory/helm-ees-all/
- name: sprsnapshot-service
- version: 1.0.0-a7da622
- images:
- - image: blr-artifactory.cloud.health.ge.com/docker-eis-all/ehl-sprsnap-service/ehl-sprsnap-service:1.0.0-a7da622
- - image: blr-artifactory.cloud.health.ge.com/docker-eis-all/ehl-sprsnap-service/ehl-alpine-nginx-sprsnap:1.0.0-a7da622
-- name: ehl-metacontroller
- type: helm
- layer: addon
- registry: helm-blr-ees
- version: 0.0.1-aa67f86
- tag: 0.0.1-aa67f86
- values: |-
- pack:
- namespace: "edison-system"
- releaseNameOverride:
- ehl-metacontroller: ehl-metacontroller
- spectrocloud.com/install-priority: "170"
- content:
- charts:
- - repo: https://blr-artifactory.cloud.health.ge.com/artifactory/helm-ees-all/
- name: ehl-metacontroller
- version: 0.0.1-aa67f86
- images:
- - image: blr-artifactory.cloud.health.ge.com/docker-eis-all/metacontrollerio/metacontroller:v1.0.3
-- name: eis-postgres
- type: helm
- layer: addon
- registry: helm-blr-ees
- version: 2.0.0-d2433f4
- tag: 2.0.0-d2433f4
- values: |-
- pack:
- namespace: "edison-system"
- releaseNameOverride:
- eis-postgres: eis-common-postgres
- spectrocloud.com/install-priority: "180"
- content:
- charts:
- - repo: https://blr-artifactory.cloud.health.ge.com/artifactory/helm-ees-all/
- name: eis-postgres
- version: 2.0.0-d2433f4
- images:
- - image: blr-artifactory.cloud.health.ge.com/docker-eis-all/postgres-operator/acid/postgres-operator:v1.6.0
- - image: blr-artifactory.cloud.health.ge.com/docker-eis-all/postgres-operator/acid/spilo-12:1.6-p5
- - image: blr-artifactory.cloud.health.ge.com/docker-eis-all/eis-postgres/precheck:2.0.0-d2433f4
- - image: blr-artifactory.cloud.health.ge.com/docker-eis-all/eis-postgres/post_delete_hook:2.0.0-d2433f4
- - image: blr-artifactory.cloud.health.ge.com/docker-eis-all/wrouesnel/postgres_exporter:v0.8.0
- - image: blr-artifactory.cloud.health.ge.com/docker-eis-all/eis-postgres/postgres_import_export:2.0.0-d2433f4
- - image: blr-artifactory.cloud.health.ge.com/docker-eis-all/eis-postgres/webhook:2.0.0-d2433f4
-
- postgres-operator:
- enabled: true
- eisDicomRsDb:
- enabled: false
- postgres-import-export:
- enabled: true
- postgres-monitoring:
- enabled: true
- eespostgresaccount:
- enabled: true
-- name: edison-priority-scheduler
- type: helm
- layer: addon
- registry: helm-blr-ees
- version: 1.0.0-654a62d
- tag: 1.0.0-654a62d
- values: |-
- pack:
- content:
- charts:
- - repo: https://blr-artifactory.cloud.health.ge.com/artifactory/helm-ees-all
- name: edison-priority-scheduler
- version: 1.0.0-654a62d
- images:
- - image: blr-artifactory.cloud.health.ge.com/docker-eis-all/edison-priority-scheduler/edison-priority-scheduler:1.0.0-654a62d
- - image: blr-artifactory.cloud.health.ge.com/docker-eis-all/ehl-redis/redis:6.2.5
- - hc-eu-west-aws-artifactory.cloud.health.ge.com/docker-eis-dev/eps-test-client:latest
- namespace: "edison-priority-scheduler"
- releaseNameOverride:
- edison-priority-scheduler: edison-priority-scheduler
- spectrocloud.com/install-priority: "200"
-
- global:
- resource_config:
- manual: true
- prometheusRules:
- enabled: false
-- name: eis-dicom-postgres--eis-postgres
- type: helm
- layer: addon
- registry: helm-blr-ees
- version: 2.0.0-d2433f4
- tag: 2.0.0-d2433f4
- values: "pack:\n namespace: \"edison-system\"\n releaseNameOverride:\n eis-postgres:
- eis-dicom-postgres\n spectrocloud.com/install-priority: \"700\"\n spectrocloud.com/display-name:
- \"eis-dicom-postgres\"\n content:\n charts:\n - repo: https://blr-artifactory.cloud.health.ge.com/artifactory/helm-ees-all/\n
- \ name: eis-postgres\n version: 2.0.0-d2433f4\n images:\n -
- image: blr-artifactory.cloud.health.ge.com/docker-eis-all/postgres-operator/acid/postgres-operator:v1.6.0\n
- \ - image: blr-artifactory.cloud.health.ge.com/docker-eis-all/postgres-operator/acid/spilo-12:1.6-p5\n
- \ - image: blr-artifactory.cloud.health.ge.com/docker-eis-all/eis-postgres/precheck:2.0.0-d2433f4\n
- \ - image: blr-artifactory.cloud.health.ge.com/docker-eis-all/eis-postgres/post_delete_hook:2.0.0-d2433f4\n
- \ - image: blr-artifactory.cloud.health.ge.com/docker-eis-all/wrouesnel/postgres_exporter:v0.8.0\n
- \ - image: blr-artifactory.cloud.health.ge.com/docker-eis-all/eis-postgres/postgres_import_export:2.0.0-d2433f4\n
- \ - image: blr-artifactory.cloud.health.ge.com/docker-eis-all/eis-postgres/webhook:2.0.0-d2433f4
- \n \n postgres-operator:\n enabled: false\n eisDicomRsDb:\n enabled:
- true\n postgres-import-export:\n enabled: false\n postgres-monitoring:\n
- \ enabled: false\n eespostgresaccount:\n enabled: false"
-cloudType: all
\ No newline at end of file
diff --git a/examples/local-simplified-yaml/admin/config/profile/profile-ehl-core.yaml.x b/examples/local-simplified-yaml/admin/config/profile/profile-ehl-core.yaml.x
deleted file mode 100644
index 27ae982..0000000
--- a/examples/local-simplified-yaml/admin/config/profile/profile-ehl-core.yaml.x
+++ /dev/null
@@ -1,55 +0,0 @@
-name: ehl-core
-description: ""
-type: add-on
-packs:
-- name: bootstrap
- type: helm
- layer: addon
- registry: helm-blr-ees
- version: 1.0.0-7ff1498
- tag: 1.0.0-7ff1498
- values: |-
- pack:
- namespace: "ehl-control-plane"
- spectrocloud.com/install-priority: "10"
- content:
- charts:
- - repo: https://blr-artifactory.cloud.health.ge.com/artifactory/helm-ees-all
- name: bootstrap
- version: 1.0.0-7ff1498
-- name: clustermgr-service
- type: helm
- layer: addon
- registry: helm-blr-ees
- version: 1.0.0-f4a4859
- tag: 1.0.0-f4a4859
- values: |-
- pack:
- namespace: "ehl-control-plane"
- spectrocloud.com/install-priority: "20"
- content:
- charts:
- - repo: https://blr-artifactory.cloud.health.ge.com/artifactory/helm-ees-all
- name: clustermgr-service
- version: 1.0.0-f4a4859
- images:
- - image: blr-artifactory.cloud.health.ge.com/docker-eis-all/cluster-mgr/nginx_sidecar:1.0.0-f4a4859
- - image: blr-artifactory.cloud.health.ge.com/docker-eis-all/cluster-mgr/cluster-mgr:1.0.0-f4a4859
-- name: host-service
- type: helm
- layer: addon
- registry: helm-blr-ees
- version: 1.0.0-bcd39e0
- tag: 1.0.0-bcd39e0
- values: |-
- pack:
- namespace: "ehl-control-plane"
- spectrocloud.com/install-priority: "30"
- content:
- charts:
- - repo: https://blr-artifactory.cloud.health.ge.com/artifactory/helm-ees-all
- name: host-service
- version: 1.0.0-bcd39e0
- images:
- - image: blr-artifactory.cloud.health.ge.com/docker-eis-all/ehl-host-service/ehl-host-service:1.0.0-bcd39e0
-cloudType: all
\ No newline at end of file
diff --git a/examples/local-simplified-yaml/admin/config/profile/profile-spectro-core.yaml b/examples/local-simplified-yaml/admin/config/profile/profile-spectro-core.yaml
deleted file mode 100644
index d3bb314..0000000
--- a/examples/local-simplified-yaml/admin/config/profile/profile-spectro-core.yaml
+++ /dev/null
@@ -1,50 +0,0 @@
-name: spectro-core
-description: spectro-core
-type: add-on
-cloudType: all
-packs:
- - name: "spectro-proxy"
- registry: Public Repo
- type: "spectro"
- layer: "addon"
- version: "1.0.0"
- values: |
- pack:
- spectrocloud.com/install-priority: "-200"
- content:
- images:
- - image: gcr.io/spectro-images-public/release/frpc:v1.0.0
- - image: gcr.io/spectro-images-public/release/frpc-init:v1.0.0
- artifacts:
- - source: https://rishi-public-bucket.s3.us-west-2.amazonaws.com/content/web/spectro.png
-
-
- manifests:
- spectro-proxy:
- namespace: "cluster-{{ .spectro.system.cluster.uid }}"
- server: "{{ .spectro.system.reverseproxy.server }}"
- clusterUid: "{{ .spectro.system.cluster.uid }}"
- subdomain: "cluster-{{ .spectro.system.cluster.uid }}"
-
- - name: "lb-metallb"
- registry: Public Repo
- type: "spectro"
- layer: "addon"
- version: "0.11.0"
- values: |
- pack:
- spectrocloud.com/install-priority: "0"
- content:
- images:
- - image: quay.io/metallb/controller:v0.11.0
- - image: quay.io/metallb/speaker:v0.11.0
- manifests:
- metallb:
- #The namespace to use for deploying MetalLB
- namespace: "metallb-system"
- #MetalLB will skip setting .0 & .255 IP address when this flag is enabled
- avoidBuggyIps: true
- # Layer 2 config; The IP address range MetalLB should use while assigning IP's for svc type LoadBalancer
- # For the supported formats, check https://metallb.universe.tf/configuration/#layer-2-configuration
- addresses:
- - 192.168.100.245-192.168.100.254
\ No newline at end of file
diff --git a/examples/local-simplified-yaml/admin/config/profile/profile-vsphere-with-creds-infra.yaml b/examples/local-simplified-yaml/admin/config/profile/profile-vsphere-with-creds-infra.yaml
deleted file mode 100644
index 0281bcc..0000000
--- a/examples/local-simplified-yaml/admin/config/profile/profile-vsphere-with-creds-infra.yaml
+++ /dev/null
@@ -1,206 +0,0 @@
-name: vsphere-with-creds-infra
-description: ""
-type: cluster
-cloudType: vsphere
-packs:
- - name: sles-vsphere
- type: spectro
- layer: os
- registry: Public Repo
- version: 15.3
- tag: 15.3
- values: |-
- kubeadmconfig:
- preKubeadmCommands:
- - echo "Executing pre kube admin config commands"
- - update-ca-certificates
- - 'systemctl restart containerd; sleep 3'
- - 'while [ ! -S /var/run/containerd/containerd.sock ]; do echo "Waiting for containerd..."; sleep 1; done'
- postKubeadmCommands:
- - echo "Executing post kube admin config commands"
- files:
- - targetPath: /usr/share/pki/trust/anchors/ca.crt
- targetOwner: "root:root"
- targetPermissions: "0644"
- content: |
- -----BEGIN CERTIFICATE-----
- MIIDozCCAougAwIBAgIQeO8XlqAMLhxvtCap35yktzANBgkqhkiG9w0BAQsFADBS
- MQswCQYDVQQGEwJVUzEhMB8GA1UEChMYR2VuZXJhbCBFbGVjdHJpYyBDb21wYW55
- MSAwHgYDVQQDExdHRSBFeHRlcm5hbCBSb290IENBIDIuMTAeFw0xNTAzMDUwMDAw
- MDBaFw0zNTAzMDQyMzU5NTlaMFIxCzAJBgNVBAYTAlVTMSEwHwYDVQQKExhHZW5l
- cmFsIEVsZWN0cmljIENvbXBhbnkxIDAeBgNVBAMTF0dFIEV4dGVybmFsIFJvb3Qg
- Q0EgMi4xMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAzCzT4wNRZtr2
- XTzoTMjppjulZfG35/nOt44q2zg47sxwgZ8o4qjcrwzIhsntoFrRQssjXSF5qXdC
- zsm1G7f04qEBimuOH/X+CidWX+sudCS8VyRjXi9cyvUW4/mYKCLXv5M6HhEoIHCD
- Xdo6yUr5mSrf18qRR3yUFz0HYXopa2Ls3Q6lBvEUO2Xw04vqVvmg1h7S5jYuZovC
- oIbd2+4QGdoSZPgtSNpCxSR+NwtPpzYZpmqiUuDGfVpO3HU42APB0c60D91cJho6
- tZpXYHDsR/RxYGm02K/iMGefD5F4YMrtoKoHbskty6+u5FUOrUgGATJJGtxleg5X
- KotQYu8P1wIDAQABo3UwczASBgNVHRMBAf8ECDAGAQH/AgECMA4GA1UdDwEB/wQE
- AwIBBjAuBgNVHREEJzAlpCMwITEfMB0GA1UEAxMWR0UtUm9vdC1DT00tUlNBLTIw
- NDgtMTAdBgNVHQ4EFgQU3N2mUCJBCLYgtpZyxBeBMJwNZuowDQYJKoZIhvcNAQEL
- BQADggEBACF4Zsf2Nm0FpVNeADUH+sl8mFgwL7dfL7+6n7hOgH1ZXcv6pDkoNtVE
- 0J/ZPdHJW6ntedKEZuizG5BCclUH3IyYK4/4GxNpFXugmWnKGy2feYwVae7Puyd7
- /iKOFEGCYx4C6E2kq3aFjJqiq1vbgSS/B0agt1D3rH3i/+dXVxx8ZjhyZMuN+cgS
- pZL4gnhnSXFAGissxJhKsNkYgvKdOETRNn5lEgfgVyP2iOVqEguHk2Gu0gHSouLu
- 5ad/qyN+Zgbjx8vEWlywmhXb78Gaf/AwSGAwQPtmQ0310a4DulGxo/kcuS78vFH1
- mwJmHm9AIFoqBi8XpuhGmQ0nvymurEk=
- -----END CERTIFICATE-----
- - name: kubernetes
- type: spectro
- layer: k8s
- registry: Public Repo
- version: 1.21.10
- tag: 1.21.10
- values: |-
- pack:
- k8sHardening: True
- #CIDR Range for Pods in cluster
- # Note : This must not overlap with any of the host or service network
- podCIDR: "172.30.0.0/16"
- #CIDR notation IP range from which to assign service cluster IPs
- # Note : This must not overlap with any IP ranges assigned to nodes for pods.
- serviceClusterIpRange: "11.0.0.0/22"
-
- # KubeAdm customization for kubernetes hardening. Below config will be ignored if k8sHardening property above is disabled
- kubeadmconfig:
- apiServer:
- certSANs:
- - "cluster-{{ .spectro.system.cluster.uid }}.{{ .spectro.system.reverseproxy.server }}"
- extraArgs:
- # Note : secure-port flag is used during kubeadm init. Do not change this flag on a running cluster
- secure-port: "6443"
- anonymous-auth: "true"
- insecure-port: "0"
- profiling: "false"
- disable-admission-plugins: "AlwaysAdmit"
- default-not-ready-toleration-seconds: "60"
- default-unreachable-toleration-seconds: "60"
- enable-admission-plugins: "NamespaceLifecycle,ServiceAccount,NodeRestriction,PodSecurityPolicy"
- audit-log-path: /var/log/apiserver/audit.log
- audit-policy-file: /etc/kubernetes/audit-policy.yaml
- audit-log-maxage: "30"
- audit-log-maxbackup: "10"
- audit-log-maxsize: "100"
- authorization-mode: RBAC,Node
- tls-cipher-suites: "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256"
- extraVolumes:
- - name: audit-log
- hostPath: /var/log/apiserver
- mountPath: /var/log/apiserver
- pathType: DirectoryOrCreate
- - name: audit-policy
- hostPath: /etc/kubernetes/audit-policy.yaml
- mountPath: /etc/kubernetes/audit-policy.yaml
- readOnly: true
- pathType: File
- controllerManager:
- extraArgs:
- profiling: "false"
- terminated-pod-gc-threshold: "25"
- pod-eviction-timeout: "1m0s"
- use-service-account-credentials: "true"
- feature-gates: "RotateKubeletServerCertificate=true"
- scheduler:
- extraArgs:
- profiling: "false"
- kubeletExtraArgs:
- read-only-port : "0"
- event-qps: "0"
- feature-gates: "RotateKubeletServerCertificate=true"
- protect-kernel-defaults: "true"
- tls-cipher-suites: "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256"
- files:
- - path: hardening/audit-policy.yaml
- targetPath: /etc/kubernetes/audit-policy.yaml
- targetOwner: "root:root"
- targetPermissions: "0600"
- - path: hardening/privileged-psp.yaml
- targetPath: /etc/kubernetes/hardening/privileged-psp.yaml
- targetOwner: "root:root"
- targetPermissions: "0600"
- - path: hardening/90-kubelet.conf
- targetPath: /etc/sysctl.d/90-kubelet.conf
- targetOwner: "root:root"
- targetPermissions: "0600"
- preKubeadmCommands:
- # For enabling 'protect-kernel-defaults' flag to kubelet, kernel parameters changes are required
- - 'echo "====> Applying kernel parameters for Kubelet"'
- - 'sysctl -p /etc/sysctl.d/90-kubelet.conf'
- postKubeadmCommands:
- # Apply the privileged PodSecurityPolicy on the first master node ; Otherwise, CNI (and other) pods won't come up
- # Sometimes api server takes a little longer to respond. Retry if applying the pod-security-policy manifest fails
- - 'export KUBECONFIG=/etc/kubernetes/admin.conf && [ -f "$KUBECONFIG" ] && { echo " ====> Applying PodSecurityPolicy" ; until $(kubectl apply -f /etc/kubernetes/hardening/privileged-psp.yaml > /dev/null ); do echo "Failed to apply PodSecurityPolicies, will retry in 5s" ; sleep 5 ; done ; } || echo "Skipping PodSecurityPolicy for worker nodes"'
-
- # Client configuration to add OIDC based authentication flags in kubeconfig
- #clientConfig:
- #oidc-issuer-url: "{{ .spectro.pack.kubernetes.kubeadmconfig.apiServer.extraArgs.oidc-issuer-url }}"
- #oidc-client-id: "{{ .spectro.pack.kubernetes.kubeadmconfig.apiServer.extraArgs.oidc-client-id }}"
- #oidc-client-secret: 1gsranjjmdgahm10j8r6m47ejokm9kafvcbhi3d48jlc3rfpprhv
- #oidc-extra-scope: profile,email
- - name: cni-calico
- type: spectro
- layer: cni
- registry: Public Repo
- version: 3.19.0
- tag: 3.19.0
- values: |-
- pack:
- content:
- images:
- - image: gcr.io/spectro-images-public/calico/kube-controllers:v3.19.0
- - image: gcr.io/spectro-images-public/calico/node:v3.19.0
- - image: gcr.io/spectro-images-public/calico/cni:v3.19.0
- - image: gcr.io/spectro-images-public/calico/pod2daemon-flexvol:v3.19.0
-
- manifests:
- calico:
-
- # IPAM type to use. Supported types are calico-ipam, host-local
- ipamType: "calico-ipam"
-
- # Should be one of CALICO_IPV4POOL_IPIP or CALICO_IPV4POOL_VXLAN
- encapsulationType: "CALICO_IPV4POOL_IPIP"
-
- # Should be one of Always, CrossSubnet, Never
- encapsulationMode: "Always"
-
- - name: csi-vsphere-csi
- type: spectro
- layer: csi
- registry: Public Repo
- version: 2.3.0
- tag: 2.3.0
- values: |-
- pack:
- content:
- images:
- - image: k8s.gcr.io/sig-storage/csi-attacher:v3.2.0
- - image: quay.io/k8scsi/csi-resizer:v1.1.0
- - image: gcr.io/cloud-provider-vsphere/csi/release/driver:v2.3.0
-
- manifests:
- #Storage class config
- vsphere:
-
- #Toggle for Default class
- isDefaultClass: "false"
-
- #Specifies file system type
- fstype: "ext4"
-
- #Allowed reclaim policies are Delete, Retain
- reclaimPolicy: "Delete"
-
- #Specifies the URL of the datastore on which the container volume needs to be provisioned.
- datastoreURL: ""
-
- #Specifies the storage policy for datastores on which the container volume needs to be provisioned.
- storagePolicyName: ""
-
- volumeBindingMode: "WaitForFirstConsumer"
-
- #Set this flag to true to enable volume expansion
- allowVolumeExpansion: true
-
- vsphere-cloud-controller-manager:
- k8sVersion: "{{ .spectro.system.kubernetes.version }}"
diff --git a/examples/local-simplified-yaml/admin/config/profile/profile-without-creds-infra.yaml b/examples/local-simplified-yaml/admin/config/profile/profile-without-creds-infra.yaml
deleted file mode 100644
index fd6fe75..0000000
--- a/examples/local-simplified-yaml/admin/config/profile/profile-without-creds-infra.yaml
+++ /dev/null
@@ -1,617 +0,0 @@
-name: without-creds-infra
-description: ""
-type: cluster
-cloudType: edge
-packs:
- - name: sles-edge
- type: spectro
- layer: os
- registry: Public Repo
- version: 15.3
- tag: 15.3
- values: |-
- kubeadmconfig:
- preKubeadmCommands:
- - echo "Executing pre kube admin config commands"
- - update-ca-certificates
- - 'systemctl restart containerd; sleep 3'
- - 'while [ ! -S /var/run/containerd/containerd.sock ]; do echo "Waiting for containerd..."; sleep 1; done'
- postKubeadmCommands:
- - echo "Executing post kube admin config commands"
- files:
- - targetPath: /usr/share/pki/trust/anchors/ca.crt
- targetOwner: "root:root"
- targetPermissions: "0644"
- content: |
- -----BEGIN CERTIFICATE-----
- MIIDozCCAougAwIBAgIQeO8XlqAMLhxvtCap35yktzANBgkqhkiG9w0BAQsFADBS
- MQswCQYDVQQGEwJVUzEhMB8GA1UEChMYR2VuZXJhbCBFbGVjdHJpYyBDb21wYW55
- MSAwHgYDVQQDExdHRSBFeHRlcm5hbCBSb290IENBIDIuMTAeFw0xNTAzMDUwMDAw
- MDBaFw0zNTAzMDQyMzU5NTlaMFIxCzAJBgNVBAYTAlVTMSEwHwYDVQQKExhHZW5l
- cmFsIEVsZWN0cmljIENvbXBhbnkxIDAeBgNVBAMTF0dFIEV4dGVybmFsIFJvb3Qg
- Q0EgMi4xMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAzCzT4wNRZtr2
- XTzoTMjppjulZfG35/nOt44q2zg47sxwgZ8o4qjcrwzIhsntoFrRQssjXSF5qXdC
- zsm1G7f04qEBimuOH/X+CidWX+sudCS8VyRjXi9cyvUW4/mYKCLXv5M6HhEoIHCD
- Xdo6yUr5mSrf18qRR3yUFz0HYXopa2Ls3Q6lBvEUO2Xw04vqVvmg1h7S5jYuZovC
- oIbd2+4QGdoSZPgtSNpCxSR+NwtPpzYZpmqiUuDGfVpO3HU42APB0c60D91cJho6
- tZpXYHDsR/RxYGm02K/iMGefD5F4YMrtoKoHbskty6+u5FUOrUgGATJJGtxleg5X
- KotQYu8P1wIDAQABo3UwczASBgNVHRMBAf8ECDAGAQH/AgECMA4GA1UdDwEB/wQE
- AwIBBjAuBgNVHREEJzAlpCMwITEfMB0GA1UEAxMWR0UtUm9vdC1DT00tUlNBLTIw
- NDgtMTAdBgNVHQ4EFgQU3N2mUCJBCLYgtpZyxBeBMJwNZuowDQYJKoZIhvcNAQEL
- BQADggEBACF4Zsf2Nm0FpVNeADUH+sl8mFgwL7dfL7+6n7hOgH1ZXcv6pDkoNtVE
- 0J/ZPdHJW6ntedKEZuizG5BCclUH3IyYK4/4GxNpFXugmWnKGy2feYwVae7Puyd7
- /iKOFEGCYx4C6E2kq3aFjJqiq1vbgSS/B0agt1D3rH3i/+dXVxx8ZjhyZMuN+cgS
- pZL4gnhnSXFAGissxJhKsNkYgvKdOETRNn5lEgfgVyP2iOVqEguHk2Gu0gHSouLu
- 5ad/qyN+Zgbjx8vEWlywmhXb78Gaf/AwSGAwQPtmQ0310a4DulGxo/kcuS78vFH1
- mwJmHm9AIFoqBi8XpuhGmQ0nvymurEk=
- -----END CERTIFICATE-----
- - name: kubernetes
- type: spectro
- layer: k8s
- registry: Public Repo
- version: 1.21.10
- tag: 1.21.10
- values: |-
- pack:
- k8sHardening: True
- #CIDR Range for Pods in cluster
- # Note : This must not overlap with any of the host or service network
- podCIDR: "172.10.0.0/16"
- #CIDR notation IP range from which to assign service cluster IPs
- # Note : This must not overlap with any IP ranges assigned to nodes for pods.
- serviceClusterIpRange: "11.0.0.0/22"
- content:
- images:
- - image: gcr.io/spectro-images-public/release/edge/node:s-153-0-k-12110-0
-
- # KubeAdm customization for kubernetes hardening. Below config will be ignored if k8sHardening property above is disabled
- kubeadmconfig:
- apiServer:
- certSANs:
- - "cluster-{{ .spectro.system.cluster.uid }}.{{ .spectro.system.reverseproxy.server }}"
- extraArgs:
- # Note : secure-port flag is used during kubeadm init. Do not change this flag on a running cluster
- secure-port: "6443"
- anonymous-auth: "true"
- insecure-port: "0"
- profiling: "false"
- disable-admission-plugins: "AlwaysAdmit"
- default-not-ready-toleration-seconds: "60"
- default-unreachable-toleration-seconds: "60"
- enable-admission-plugins: "NamespaceLifecycle,ServiceAccount,NodeRestriction,PodSecurityPolicy"
- audit-log-path: /var/log/apiserver/audit.log
- audit-policy-file: /etc/kubernetes/audit-policy.yaml
- audit-log-maxage: "30"
- audit-log-maxbackup: "10"
- audit-log-maxsize: "100"
- authorization-mode: RBAC,Node
- tls-cipher-suites: "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256"
- extraVolumes:
- - name: audit-log
- hostPath: /var/log/apiserver
- mountPath: /var/log/apiserver
- pathType: DirectoryOrCreate
- - name: audit-policy
- hostPath: /etc/kubernetes/audit-policy.yaml
- mountPath: /etc/kubernetes/audit-policy.yaml
- readOnly: true
- pathType: File
- controllerManager:
- extraArgs:
- profiling: "false"
- terminated-pod-gc-threshold: "25"
- pod-eviction-timeout: "1m0s"
- use-service-account-credentials: "true"
- feature-gates: "RotateKubeletServerCertificate=true"
- scheduler:
- extraArgs:
- profiling: "false"
- kubeletExtraArgs:
- read-only-port : "0"
- event-qps: "0"
- feature-gates: "RotateKubeletServerCertificate=true"
- protect-kernel-defaults: "true"
- tls-cipher-suites: "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256"
- files:
- - path: hardening/audit-policy.yaml
- targetPath: /etc/kubernetes/audit-policy.yaml
- targetOwner: "root:root"
- targetPermissions: "0600"
- - path: hardening/privileged-psp.yaml
- targetPath: /etc/kubernetes/hardening/privileged-psp.yaml
- targetOwner: "root:root"
- targetPermissions: "0600"
- - path: hardening/90-kubelet.conf
- targetPath: /etc/sysctl.d/90-kubelet.conf
- targetOwner: "root:root"
- targetPermissions: "0600"
- preKubeadmCommands:
- # For enabling 'protect-kernel-defaults' flag to kubelet, kernel parameters changes are required
- - 'echo "====> Applying kernel parameters for Kubelet"'
- - 'sysctl -p /etc/sysctl.d/90-kubelet.conf'
- postKubeadmCommands:
- # Apply the privileged PodSecurityPolicy on the first master node ; Otherwise, CNI (and other) pods won't come up
- # Sometimes api server takes a little longer to respond. Retry if applying the pod-security-policy manifest fails
- - 'export KUBECONFIG=/etc/kubernetes/admin.conf && [ -f "$KUBECONFIG" ] && { echo " ====> Applying PodSecurityPolicy" ; until $(kubectl apply -f /etc/kubernetes/hardening/privileged-psp.yaml > /dev/null ); do echo "Failed to apply PodSecurityPolicies, will retry in 5s" ; sleep 5 ; done ; } || echo "Skipping PodSecurityPolicy for worker nodes"'
-
- # Client configuration to add OIDC based authentication flags in kubeconfig
- #clientConfig:
- #oidc-issuer-url: "{{ .spectro.pack.kubernetes.kubeadmconfig.apiServer.extraArgs.oidc-issuer-url }}"
- #oidc-client-id: "{{ .spectro.pack.kubernetes.kubeadmconfig.apiServer.extraArgs.oidc-client-id }}"
- #oidc-client-secret: 1gsranjjmdgahm10j8r6m47ejokm9kafvcbhi3d48jlc3rfpprhv
- #oidc-extra-scope: profile,email
- - name: cni-calico
- type: spectro
- layer: cni
- registry: Public Repo
- version: 3.19.0
- tag: 3.19.0
- values: |-
- pack:
- content:
- images:
- - image: gcr.io/spectro-images-public/calico/kube-controllers:v3.19.0
- - image: gcr.io/spectro-images-public/calico/node:v3.19.0
- - image: gcr.io/spectro-images-public/calico/cni:v3.19.0
- - image: gcr.io/spectro-images-public/calico/pod2daemon-flexvol:v3.19.0
-
- manifests:
- calico:
-
- # IPAM type to use. Supported types are calico-ipam, host-local
- ipamType: "calico-ipam"
-
- # Should be one of CALICO_IPV4POOL_IPIP or CALICO_IPV4POOL_VXLAN
- encapsulationType: "CALICO_IPV4POOL_IPIP"
-
- # Should be one of Always, CrossSubnet, Never
- encapsulationMode: "Always"
- - name: csi-rook-ceph
- type: spectro
- layer: csi
- registry: Public Repo
- version: 1.8.0
- tag: 1.8.0
- values: |-
- pack:
- content:
- images:
- - image: k8s.gcr.io/sig-storage/csi-node-driver-registrar:v2.3.0
- - image: k8s.gcr.io/sig-storage/csi-attacher:v3.3.0
- - image: k8s.gcr.io/sig-storage/csi-snapshotter:v4.2.0
- - image: k8s.gcr.io/sig-storage/csi-resizer:v1.3.0
- - image: k8s.gcr.io/sig-storage/csi-provisioner:v3.0.0
- - image: quay.io/cephcsi/cephcsi:v3.4.0
- - image: quay.io/ceph/ceph:v16.2.7
- - image: docker.io/rook/ceph:v1.8.0
-
-
- manifests:
- storageclass:
- contents: |
- apiVersion: ceph.rook.io/v1
- kind: CephFilesystem
- metadata:
- name: myfs
- namespace: rook-ceph # namespace:cluster
- spec:
- # The metadata pool spec. Must use replication.
- metadataPool:
- replicated:
- size: 3
- requireSafeReplicaSize: true
- parameters:
- # Inline compression mode for the data pool
- # Further reference: https://docs.ceph.com/docs/master/rados/configuration/bluestore-config-ref/#inline-compression
- compression_mode:
- none
- # gives a hint (%) to Ceph in terms of expected consumption of the total cluster capacity of a given pool
- # for more info: https://docs.ceph.com/docs/master/rados/operations/placement-groups/#specifying-expected-pool-size
- #target_size_ratio: ".5"
- # The list of data pool specs. Can use replication or erasure coding.
- dataPools:
- - name: replicated
- failureDomain: host
- replicated:
- size: 3
- # Disallow setting pool with replica 1, this could lead to data loss without recovery.
- # Make sure you're *ABSOLUTELY CERTAIN* that is what you want
- requireSafeReplicaSize: true
- parameters:
- # Inline compression mode for the data pool
- # Further reference: https://docs.ceph.com/docs/master/rados/configuration/bluestore-config-ref/#inline-compression
- compression_mode:
- none
- # gives a hint (%) to Ceph in terms of expected consumption of the total cluster capacity of a given pool
- # for more info: https://docs.ceph.com/docs/master/rados/operations/placement-groups/#specifying-expected-pool-size
- #target_size_ratio: ".5"
- # Whether to preserve filesystem after CephFilesystem CRD deletion
- preserveFilesystemOnDelete: true
- # The metadata service (mds) configuration
- metadataServer:
- # The number of active MDS instances
- activeCount: 1
- # Whether each active MDS instance will have an active standby with a warm metadata cache for faster failover.
- # If false, standbys will be available, but will not have a warm cache.
- activeStandby: true
- # The affinity rules to apply to the mds deployment
- placement:
- # nodeAffinity:
- # requiredDuringSchedulingIgnoredDuringExecution:
- # nodeSelectorTerms:
- # - matchExpressions:
- # - key: role
- # operator: In
- # values:
- # - mds-node
- # topologySpreadConstraints:
- # tolerations:
- # - key: mds-node
- # operator: Exists
- # podAffinity:
- podAntiAffinity:
- requiredDuringSchedulingIgnoredDuringExecution:
- - labelSelector:
- matchExpressions:
- - key: app
- operator: In
- values:
- - rook-ceph-mds
- # topologyKey: kubernetes.io/hostname will place MDS across different hosts
- topologyKey: kubernetes.io/hostname
- preferredDuringSchedulingIgnoredDuringExecution:
- - weight: 100
- podAffinityTerm:
- labelSelector:
- matchExpressions:
- - key: app
- operator: In
- values:
- - rook-ceph-mds
- # topologyKey: */zone can be used to spread MDS across different AZ
- # Use in k8s cluster if your cluster is v1.16 or lower
- # Use in k8s cluster is v1.17 or upper
- topologyKey: topology.kubernetes.io/zone
- # A key/value list of annotations
- annotations:
- # key: value
- # A key/value list of labels
- labels:
- # key: value
- resources:
- # The requests and limits set here, allow the filesystem MDS Pod(s) to use half of one CPU core and 1 gigabyte of memory
- # limits:
- # cpu: "500m"
- # memory: "1024Mi"
- # requests:
- # cpu: "500m"
- # memory: "1024Mi"
- # priorityClassName: my-priority-class
- # Filesystem mirroring settings
- # mirroring:
- # enabled: true
- # list of Kubernetes Secrets containing the peer token
- # for more details see: https://docs.ceph.com/en/latest/dev/cephfs-mirroring/#bootstrap-peers
- # peers:
- #secretNames:
- #- secondary-cluster-peer
- # specify the schedule(s) on which snapshots should be taken
- # see the official syntax here https://docs.ceph.com/en/latest/cephfs/snap-schedule/#add-and-remove-schedules
- # snapshotSchedules:
- # - path: /
- # interval: 24h # daily snapshots
- # startTime: 11:55
- # manage retention policies
- # see syntax duration here https://docs.ceph.com/en/latest/cephfs/snap-schedule/#add-and-remove-retention-policies
- # snapshotRetention:
- # - path: /
- # duration: "h 24"
- ---
- apiVersion: storage.k8s.io/v1
- kind: StorageClass
- metadata:
- name: standard
- annotations:
- storageclass.kubernetes.io/is-default-class: "true"
- # Change "rook-ceph" provisioner prefix to match the operator namespace if needed
- provisioner: rook-ceph.cephfs.csi.ceph.com # driver:namespace:operator
- parameters:
- # clusterID is the namespace where the rook cluster is running
- # If you change this namespace, also change the namespace below where the secret namespaces are defined
- clusterID: rook-ceph # namespace:cluster
-
- # CephFS filesystem name into which the volume shall be created
- fsName: myfs
-
- # Ceph pool into which the volume shall be created
- # Required for provisionVolume: "true"
- pool: myfs-data0
-
- # The secrets contain Ceph admin credentials. These are generated automatically by the operator
- # in the same namespace as the cluster.
- csi.storage.k8s.io/provisioner-secret-name: rook-csi-cephfs-provisioner
- csi.storage.k8s.io/provisioner-secret-namespace: rook-ceph # namespace:cluster
- csi.storage.k8s.io/controller-expand-secret-name: rook-csi-cephfs-provisioner
- csi.storage.k8s.io/controller-expand-secret-namespace: rook-ceph # namespace:cluster
- csi.storage.k8s.io/node-stage-secret-name: rook-csi-cephfs-node
- csi.storage.k8s.io/node-stage-secret-namespace: rook-ceph # namespace:cluster
-
- # (optional) The driver can use either ceph-fuse (fuse) or ceph kernel client (kernel)
- # If omitted, default volume mounter will be used - this is determined by probing for ceph-fuse
- # or by setting the default mounter explicitly via --volumemounter command-line argument.
- # mounter: kernel
- reclaimPolicy: Delete
- allowVolumeExpansion: true
- #Supported binding modes are Immediate, WaitForFirstConsumer
- volumeBindingMode: "WaitForFirstConsumer"
- mountOptions:
- # uncomment the following line for debugging
- #- debug
-
- cluster:
- contents: |
- apiVersion: ceph.rook.io/v1
- kind: CephCluster
- metadata:
- name: rook-ceph
- namespace: rook-ceph # namespace:cluster
- spec:
- cephVersion:
- # The container image used to launch the Ceph daemon pods (mon, mgr, osd, mds, rgw).
- # v15 is octopus, and v16 is pacific.
- # RECOMMENDATION: In production, use a specific version tag instead of the general v14 flag, which pulls the latest release and could result in different
- # versions running within the cluster. See tags available at https://hub.docker.com/r/ceph/ceph/tags/.
- # If you want to be more precise, you can always use a timestamp tag such quay.io/ceph/ceph:v16.2.7-20211208
- # This tag might not contain a new Ceph version, just security fixes from the underlying operating system, which will reduce vulnerabilities
- image: quay.io/ceph/ceph:v16.2.7
- # Whether to allow unsupported versions of Ceph. Currently `octopus` and `pacific` are supported.
- # Future versions such as `pacific` would require this to be set to `true`.
- # Do not set to true in production.
- allowUnsupported: false
- # The path on the host where configuration files will be persisted. Must be specified.
- # Important: if you reinstall the cluster, make sure you delete this directory from each host or else the mons will fail to start on the new cluster.
- # In Minikube, the '/data' directory is configured to persist across reboots. Use "/data/rook" in Minikube environment.
- dataDirHostPath: /var/lib/rook
- # Whether or not upgrade should continue even if a check fails
- # This means Ceph's status could be degraded and we don't recommend upgrading but you might decide otherwise
- # Use at your OWN risk
- # To understand Rook's upgrade process of Ceph, read https://rook.io/docs/rook/latest/ceph-upgrade.html#ceph-version-upgrades
- skipUpgradeChecks: false
- # Whether or not continue if PGs are not clean during an upgrade
- continueUpgradeAfterChecksEvenIfNotHealthy: false
- # WaitTimeoutForHealthyOSDInMinutes defines the time (in minutes) the operator would wait before an OSD can be stopped for upgrade or restart.
- # If the timeout exceeds and OSD is not ok to stop, then the operator would skip upgrade for the current OSD and proceed with the next one
- # if `continueUpgradeAfterChecksEvenIfNotHealthy` is `false`. If `continueUpgradeAfterChecksEvenIfNotHealthy` is `true`, then opertor would
- # continue with the upgrade of an OSD even if its not ok to stop after the timeout. This timeout won't be applied if `skipUpgradeChecks` is `true`.
- # The default wait timeout is 10 minutes.
- waitTimeoutForHealthyOSDInMinutes: 10
- mon:
- # Set the number of mons to be started. Generally recommended to be 3.
- # For highest availability, an odd number of mons should be specified.
- count: 3
- # The mons should be on unique nodes. For production, at least 3 nodes are recommended for this reason.
- # Mons should only be allowed on the same node for test environments where data loss is acceptable.
- allowMultiplePerNode: false
- mgr:
- # When higher availability of the mgr is needed, increase the count to 2.
- # In that case, one mgr will be active and one in standby. When Ceph updates which
- # mgr is active, Rook will update the mgr services to match the active mgr.
- count: 1
- modules:
- # Several modules should not need to be included in this list. The "dashboard" and "monitoring" modules
- # are already enabled by other settings in the cluster CR.
- - name: pg_autoscaler
- enabled: true
- # enable the ceph dashboard for viewing cluster status
- dashboard:
- enabled: true
- # serve the dashboard under a subpath (useful when you are accessing the dashboard via a reverse proxy)
- # urlPrefix: /ceph-dashboard
- # serve the dashboard at the given port.
- # port: 8443
- # serve the dashboard using SSL
- ssl: true
- # enable prometheus alerting for cluster
- monitoring:
- # requires Prometheus to be pre-installed
- enabled: false
- # namespace to deploy prometheusRule in. If empty, namespace of the cluster will be used.
- # Recommended:
- # If you have a single rook-ceph cluster, set the rulesNamespace to the same namespace as the cluster or keep it empty.
- # If you have multiple rook-ceph clusters in the same k8s cluster, choose the same namespace (ideally, namespace with prometheus
- # deployed) to set rulesNamespace for all the clusters. Otherwise, you will get duplicate alerts with multiple alert definitions.
- rulesNamespace: rook-ceph
- network:
- # enable host networking
- #provider: host
- # enable the Multus network provider
- #provider: multus
- #selectors:
- # The selector keys are required to be `public` and `cluster`.
- # Based on the configuration, the operator will do the following:
- # 1. if only the `public` selector key is specified both public_network and cluster_network Ceph settings will listen on that interface
- # 2. if both `public` and `cluster` selector keys are specified the first one will point to 'public_network' flag and the second one to 'cluster_network'
- #
- # In order to work, each selector value must match a NetworkAttachmentDefinition object in Multus
- #
- #public: public-conf --> NetworkAttachmentDefinition object name in Multus
- #cluster: cluster-conf --> NetworkAttachmentDefinition object name in Multus
- # Provide internet protocol version. IPv6, IPv4 or empty string are valid options. Empty string would mean IPv4
- #ipFamily: "IPv6"
- # Ceph daemons to listen on both IPv4 and Ipv6 networks
- #dualStack: false
- # enable the crash collector for ceph daemon crash collection
- crashCollector:
- disable: false
- # Uncomment daysToRetain to prune ceph crash entries older than the
- # specified number of days.
- #daysToRetain: 30
- # enable log collector, daemons will log on files and rotate
- # logCollector:
- # enabled: true
- # periodicity: 24h # SUFFIX may be 'h' for hours or 'd' for days.
- # automate [data cleanup process](https://github.com/rook/rook/blob/master/Documentation/ceph-teardown.md#delete-the-data-on-hosts) in cluster destruction.
- cleanupPolicy:
- # Since cluster cleanup is destructive to data, confirmation is required.
- # To destroy all Rook data on hosts during uninstall, confirmation must be set to "yes-really-destroy-data".
- # This value should only be set when the cluster is about to be deleted. After the confirmation is set,
- # Rook will immediately stop configuring the cluster and only wait for the delete command.
- # If the empty string is set, Rook will not destroy any data on hosts during uninstall.
- confirmation: ""
- # sanitizeDisks represents settings for sanitizing OSD disks on cluster deletion
- sanitizeDisks:
- # method indicates if the entire disk should be sanitized or simply ceph's metadata
- # in both case, re-install is possible
- # possible choices are 'complete' or 'quick' (default)
- method: quick
- # dataSource indicate where to get random bytes from to write on the disk
- # possible choices are 'zero' (default) or 'random'
- # using random sources will consume entropy from the system and will take much more time then the zero source
- dataSource: zero
- # iteration overwrite N times instead of the default (1)
- # takes an integer value
- iteration: 1
- # allowUninstallWithVolumes defines how the uninstall should be performed
- # If set to true, cephCluster deletion does not wait for the PVs to be deleted.
- allowUninstallWithVolumes: false
- # To control where various services will be scheduled by kubernetes, use the placement configuration sections below.
- # The example under 'all' would have all services scheduled on kubernetes nodes labeled with 'role=storage-node' and
- # tolerate taints with a key of 'storage-node'.
- # placement:
- # all:
- # nodeAffinity:
- # requiredDuringSchedulingIgnoredDuringExecution:
- # nodeSelectorTerms:
- # - matchExpressions:
- # - key: role
- # operator: In
- # values:
- # - storage-node
- # podAffinity:
- # podAntiAffinity:
- # topologySpreadConstraints:
- # tolerations:
- # - key: storage-node
- # operator: Exists
- # The above placement information can also be specified for mon, osd, and mgr components
- # mon:
- # Monitor deployments may contain an anti-affinity rule for avoiding monitor
- # collocation on the same node. This is a required rule when host network is used
- # or when AllowMultiplePerNode is false. Otherwise this anti-affinity rule is a
- # preferred rule with weight: 50.
- # osd:
- # mgr:
- # cleanup:
- annotations:
- # all:
- # mon:
- # osd:
- # cleanup:
- # prepareosd:
- # If no mgr annotations are set, prometheus scrape annotations will be set by default.
- # mgr:
- labels:
- # all:
- # mon:
- # osd:
- # cleanup:
- # mgr:
- # prepareosd:
- # monitoring is a list of key-value pairs. It is injected into all the monitoring resources created by operator.
- # These labels can be passed as LabelSelector to Prometheus
- # monitoring:
- # crashcollector:
- resources:
- # The requests and limits set here, allow the mgr pod to use half of one CPU core and 1 gigabyte of memory
- # mgr:
- # limits:
- # cpu: "500m"
- # memory: "1024Mi"
- # requests:
- # cpu: "500m"
- # memory: "1024Mi"
- # The above example requests/limits can also be added to the other components
- # mon:
- # osd:
- # For OSD it also is a possible to specify requests/limits based on device class
- # osd-hdd:
- # osd-ssd:
- # osd-nvme:
- # prepareosd:
- # mgr-sidecar:
- # crashcollector:
- # logcollector:
- # cleanup:
- # The option to automatically remove OSDs that are out and are safe to destroy.
- removeOSDsIfOutAndSafeToRemove: true
- # priorityClassNames:
- # all: rook-ceph-default-priority-class
- # mon: rook-ceph-mon-priority-class
- # osd: rook-ceph-osd-priority-class
- # mgr: rook-ceph-mgr-priority-class
- storage: # cluster level storage configuration and selection
- useAllNodes: true
- useAllDevices: false
- deviceFilter: ^sd[b-d]
- config:
- # crushRoot: "custom-root" # specify a non-default root label for the CRUSH map
- # metadataDevice: "md0" # specify a non-rotational storage so ceph-volume will use it as block db device of bluestore.
- # databaseSizeMB: "1024" # uncomment if the disks are smaller than 100 GB
- # journalSizeMB: "1024" # uncomment if the disks are 20 GB or smaller
- # osdsPerDevice: "1" # this value can be overridden at the node or device level
- # encryptedDevice: "true" # the default value for this option is "false"
- # Individual nodes and their config can be specified as well, but 'useAllNodes' above must be set to false. Then, only the named
- # nodes below will be used as storage resources. Each node's 'name' field should match their 'kubernetes.io/hostname' label.
- # nodes:
- # - name: "172.17.4.201"
- # devices: # specific devices to use for storage can be specified for each node
- # - name: "sdb"
- # - name: "nvme01" # multiple osds can be created on high performance devices
- # config:
- # osdsPerDevice: "5"
- # - name: "/dev/disk/by-id/ata-ST4000DM004-XXXX" # devices can be specified using full udev paths
- # config: # configuration can be specified at the node level which overrides the cluster level config
- # - name: "172.17.4.301"
- # deviceFilter: "^sd."
- # when onlyApplyOSDPlacement is false, will merge both placement.All() and placement.osd
- onlyApplyOSDPlacement: false
- # The section for configuring management of daemon disruptions during upgrade or fencing.
- disruptionManagement:
- # If true, the operator will create and manage PodDisruptionBudgets for OSD, Mon, RGW, and MDS daemons. OSD PDBs are managed dynamically
- # via the strategy outlined in the [design](https://github.com/rook/rook/blob/master/design/ceph/ceph-managed-disruptionbudgets.md). The operator will
- # block eviction of OSDs by default and unblock them safely when drains are detected.
- managePodBudgets: true
- # A duration in minutes that determines how long an entire failureDomain like `region/zone/host` will be held in `noout` (in addition to the
- # default DOWN/OUT interval) when it is draining. This is only relevant when `managePodBudgets` is `true`. The default value is `30` minutes.
- osdMaintenanceTimeout: 30
- # A duration in minutes that the operator will wait for the placement groups to become healthy (active+clean) after a drain was completed and OSDs came back up.
- # Operator will continue with the next drain if the timeout exceeds. It only works if `managePodBudgets` is `true`.
- # No values or 0 means that the operator will wait until the placement groups are healthy before unblocking the next drain.
- pgHealthCheckTimeout: 0
- # If true, the operator will create and manage MachineDisruptionBudgets to ensure OSDs are only fenced when the cluster is healthy.
- # Only available on OpenShift.
- manageMachineDisruptionBudgets: false
- # Namespace in which to watch for the MachineDisruptionBudgets.
- machineDisruptionBudgetNamespace: openshift-machine-api
-
- # healthChecks
- # Valid values for daemons are 'mon', 'osd', 'status'
- healthCheck:
- daemonHealth:
- mon:
- disabled: false
- interval: 45s
- osd:
- disabled: false
- interval: 60s
- status:
- disabled: false
- interval: 60s
- # Change pod liveness probe, it works for all mon,mgr,osd daemons
- livenessProbe:
- mon:
- disabled: false
- mgr:
- disabled: false
- osd:
- disabled: false
diff --git a/examples/local-simplified-yaml/admin/config/project/project-hospital-200.yaml b/examples/local-simplified-yaml/admin/config/project/project-hospital-200.yaml
deleted file mode 100644
index 91798f1..0000000
--- a/examples/local-simplified-yaml/admin/config/project/project-hospital-200.yaml
+++ /dev/null
@@ -1,2 +0,0 @@
-name: hospital-200
-description: "project for hospital 200"
diff --git a/examples/local-simplified-yaml/admin/config/project/project-hospital-201.yaml b/examples/local-simplified-yaml/admin/config/project/project-hospital-201.yaml
deleted file mode 100644
index 3e5f03c..0000000
--- a/examples/local-simplified-yaml/admin/config/project/project-hospital-201.yaml
+++ /dev/null
@@ -1,2 +0,0 @@
-name: hospital-201
-description: "project for hospital 201"
diff --git a/examples/local-simplified-yaml/admin/config/project/project-hospital-202.yaml b/examples/local-simplified-yaml/admin/config/project/project-hospital-202.yaml
deleted file mode 100644
index 92f16e6..0000000
--- a/examples/local-simplified-yaml/admin/config/project/project-hospital-202.yaml
+++ /dev/null
@@ -1,2 +0,0 @@
-name: hospital-202
-description: "project for hospital 202"
diff --git a/examples/local-simplified-yaml/admin/gitlab_project.tf b/examples/local-simplified-yaml/admin/gitlab_project.tf
deleted file mode 100644
index 7582adc..0000000
--- a/examples/local-simplified-yaml/admin/gitlab_project.tf
+++ /dev/null
@@ -1,62 +0,0 @@
-#locals {
-# gitlab_project_ids = {
-# for k, v in gitlab_project.this :
-# v.name => v.id
-# }
-#}
-#
-#resource "gitlab_project" "this" {
-# for_each = local.projects
-#
-# name = each.value.name
-# description = each.value.description
-# visibility_level = "public" # or 'private'
-# pipelines_enabled = true
-# shared_runners_enabled = true # shared runners means runners from different project can be used
-# import_url = each.value.import_url
-#}
-#
-#resource "gitlab_project_variable" "host" {
-# for_each = local.projects
-#
-# project = local.gitlab_project_ids[each.value.name]
-# key = "SC_HOST_DEV"
-# value = var.sc_host
-# protected = false
-#}
-#
-#resource "gitlab_project_variable" "username" {
-# for_each = local.projects
-#
-# project = local.gitlab_project_ids[each.value.name]
-# key = "SC_USERNAME_DEV"
-# value = var.sc_username
-# protected = false
-#}
-#
-#resource "gitlab_project_variable" "password" {
-# for_each = local.projects
-#
-# project = local.gitlab_project_ids[each.value.name]
-# key = "SC_PASSWORD_DEV"
-# value = var.sc_password
-# protected = false
-#}
-#
-#resource "gitlab_project_variable" "project" {
-# for_each = local.projects
-#
-# project = local.gitlab_project_ids[each.value.name]
-# key = "SC_PROJECT_DEV"
-# value = each.value.name
-# protected = false
-#}
-#
-#resource "gitlab_project_variable" "statekey" {
-# for_each = local.projects
-#
-# project = local.gitlab_project_ids[each.value.name]
-# key = "PROJECT_TF_STATE"
-# value = each.value.name
-# protected = false
-#}
\ No newline at end of file
diff --git a/examples/local-simplified-yaml/admin/main.tf b/examples/local-simplified-yaml/admin/main.tf
deleted file mode 100644
index 102662c..0000000
--- a/examples/local-simplified-yaml/admin/main.tf
+++ /dev/null
@@ -1,55 +0,0 @@
-terraform {
- required_version = ">= 0.14.0"
-
- required_providers {
- spectrocloud = {
- version = "= 0.6.10-pre"
- source = "spectrocloud/spectrocloud"
- }
-
- # gitlab = {
- # source = "gitlabhq/gitlab"
- # version = "3.6.0"
- # }
- }
-}
-
-variable "sc_host" {}
-variable "sc_api_key" {
- sensitive = true
-}
-
-provider "spectrocloud" {
- host = var.sc_host
- api_key = var.sc_api_key
- project_name = ""
-}
-
-#variable "gitlab_token" {}
-#
-#provider "gitlab" {
-# token = var.gitlab_token
-#}
-
-locals {
- projects = {
- for k in fileset("config/project", "project-*.yaml") :
- trimsuffix(k, ".yaml") => yamldecode(file("config/project/${k}"))
- }
-
- profiles = {
- for k in fileset("config/profile", "profile-*.yaml") :
- trimsuffix(k, ".yaml") => yamldecode(file("config/profile/${k}"))
- }
-}
-
-module "Spectro" {
- source = "github.com/spectrocloud/terraform-spectrocloud-modules"
-
- # It is recommended to use latest version of module instead of using latest from github
- #source = "spectrocloud/modules/spectrocloud"
- #version = "0.0.7"
-
- projects = local.projects
- profiles = local.profiles
-}
diff --git a/examples/local-simplified-yaml/admin/terraform.template.tfvars b/examples/local-simplified-yaml/admin/terraform.template.tfvars
deleted file mode 100644
index 9822bc4..0000000
--- a/examples/local-simplified-yaml/admin/terraform.template.tfvars
+++ /dev/null
@@ -1,5 +0,0 @@
-# Credentials
-sc_host = "{enter Spectro Cloud host, blank for SaaS}"
-sc_api_key = "{enter Spectro Cloud API Key}"
-
-#gitlab_token = "{enter Gitlab access token}"
\ No newline at end of file
diff --git a/examples/local-simplified-yaml/project-bm-200/README.md b/examples/local-simplified-yaml/project-bm-200/README.md
deleted file mode 100644
index a81c37d..0000000
--- a/examples/local-simplified-yaml/project-bm-200/README.md
+++ /dev/null
@@ -1 +0,0 @@
-Bare metal appliance project
\ No newline at end of file
diff --git a/examples/local-simplified-yaml/project-bm-200/config/appliance/appliance-hospital-200.yaml b/examples/local-simplified-yaml/project-bm-200/config/appliance/appliance-hospital-200.yaml
deleted file mode 100644
index 55a96b8..0000000
--- a/examples/local-simplified-yaml/project-bm-200/config/appliance/appliance-hospital-200.yaml
+++ /dev/null
@@ -1 +0,0 @@
-id: "hospital-200"
diff --git a/examples/local-simplified-yaml/project-bm-200/config/cluster/cluster-hospital-200.yaml b/examples/local-simplified-yaml/project-bm-200/config/cluster/cluster-hospital-200.yaml
deleted file mode 100644
index 66b389b..0000000
--- a/examples/local-simplified-yaml/project-bm-200/config/cluster/cluster-hospital-200.yaml
+++ /dev/null
@@ -1,50 +0,0 @@
-name: hospital-200
-cloudType: libvirt
-tags:
-- "skip_completion"
-profiles:
- infra:
- name: bm-infra
- system:
- name: hospital-200-system-profile
- addons:
- - name: spectro-core
-cloud_config:
- ssh_key: "ssh-rsa AADS0v57QKjb1o9ubvvATQNg2T3x+inckfzfgX3et1H9X1oSp0FqY1+Mmy5nfTtTyIj5GetePsqa+MgCjnLfCBiOzmZSRgWEre//IipNzMnnZC7El5KJCQn8ksF+DYY9eT9NtNFEMALTZC6hn8BnMc14zqxoJP/GNHftmig8TJC500Uofdr4OKTCRr1JwHS79Cx9LyZdAp/1D8mL6bIMyGOTPVQ8xUpmEYj77m1kdiCHCk22YtLyfUWuQ0SC+2p1soDoNfJUpmxcKboOTZsLq1HDCFrqSyLUWS1PrYZ/MzhsPrsDewB1iHLbYDt87r2odJOpxMO1vNWMOYontODdr5JPKBpCcd/noNyOy/m4Spntytfb/J3kM1oz3dpPfN0xXmC19uR1xHklmbtg1j784IMu7umI2ZCpUwLADAodkbxmbacdkp5I+1NFgrFamvnTjjQAvRexV31m4m9GielKFQ4tCCId2yagMBWRFn5taEhb3SKnRxBcAzaJLopUyErOtqxvSywGvb53v4MEShqBaQSUv4gHfw== spectro2022"
- vip: 192.168.100.15
- ntp_servers: ["ntp.ge.com"]
-node_groups:
- - name: master-pool
- control_plane: true
- control_plane_as_worker: true
- count: 1
- disk_size_gb: 60
- memory_mb: 8192
- cpu: 2
- placements:
- - appliance: "hospital-200"
- network_type: "bridge"
- network_names: "br0"
- network: "br"
- image_storage_pool: "ehl_images"
- target_storage_pool: "ehl_images"
- data_storage_pool: "ehl_data"
-
- - name: worker-pool
- count: 3
- disk_size_gb: 60
- memory_mb: 8192
- cpu: 4
- attached_disks:
- - size_in_gb: 30
- managed: true
- - size_in_gb: 10
- managed: true
- placements:
- - appliance: "hospital-200"
- network_type: "bridge"
- network_names: "br0"
- network: "br"
- image_storage_pool: "ehl_images"
- target_storage_pool: "ehl_images"
- data_storage_pool: "ehl_data"
diff --git a/examples/local-simplified-yaml/project-bm-200/config/profile/profile-system.yaml b/examples/local-simplified-yaml/project-bm-200/config/profile/profile-system.yaml
deleted file mode 100644
index 89ba3eb..0000000
--- a/examples/local-simplified-yaml/project-bm-200/config/profile/profile-system.yaml
+++ /dev/null
@@ -1,1152 +0,0 @@
-name: hospital-200-system-profile
-description: system-profile
-type: system
-cloudType: all
-packs:
- - name: "pfsense-gateway"
- type: manifest
- registry: Public Repo
- manifests:
- - name: pfsense-gateway-config
- content: |
- ---
- apiVersion: v1
- data:
- user-data.tmpl: |
- #cloud-config
- write_files:
- - encoding: base64
- content: ${CONFIG_XML}
- owner: root:root
- path: /cf/conf/config.xml
- permissions: '0644'
- config-xml-static.tmpl: |
-
-
- 21.5
-
-
- normal
- gateway
- edison
-
- all
-
- system
- 1998
-
-
- admins
-
- system
- 1999
- 0
- page-all
-
-
- admin
-
- system
- admins
- $2y$10$K3ejlPZsxpqhd63EzyNgf.qK4RDhlSvW2ln.gIq7kbkVdlRiZ1XB2
- 0
- user-shell-access
- 2
- c3NoLXJzYSBBQUFBQjNOemFDMXljMkVBQUFBREFRQUJBQUFCZ1FDdzR4OW43NEtsVnRqUnBBRUhSVWJldnI2dVBDa0NnYkpHSTBKTUFhdnJ3K2JlTmI1V0wyOE1nOFU0MzBiWnhJRWFHU05pSXQreDk2aGFPY0RjOEFaMjRDa0Uxb1ZUS2x6aExyQkJGc0hmaVpuMHRYK1dtZWIyaGZBcmxQRnk2ZXJaZHRVTTUrYUQ2VFBYZmR4M0VWT2FBRzNncTFhbS9lSzFBM1NPVTVjTnFZMExjcWpGVDc0dG03T0NNc1VFSkg4emlCKzQ3ZTh2TWtDZjVIcTJqdVd5RjU0NTNiYzRaRHBMcW1hSXZ0UGpyN3NyQllLczI2MWpxRFlOWEF1aytyTExoRUt4VmNuY1o1U1pDbDJyWGVIdlRnb3lqZE1IWjYvWTNKZXhuYmg0Q2dkUVlmUU9HQStyQ01mVy9MTmFBV2hCUUlWRzZ1QU5mSEhhMTZQVE1XQTQzdlhTdUFCQ0VRQmR5VVhMQi9WWFlmcklOWTJQTnNPOG1FenVWcFRSaEQrSnRpT3BFejVQTlNXZk9qSUpSYjBBSjVDR2pHb0pSczdrck5QKzZsUWlTL0k5TnV4OThURjRla2FsYkMrOG9wUGFnTEhuL2NxaG9sSm1qQXp6QzV1by9yY1VKcldUNEYvODNoYmpUdEhKT01KdEZXV2lZcURmUWVoOENGOUJoeGs9IHJvb3RAbG9jYWxob3N0
-
- pfSense.css
-
-
- user
- $2y$10$5i3toKDJLCdLS0w71m7TF.49vokuIneuA97Tq3cJYotKzeEbn0Lqu
-
- sysgwservice
-
- 2
-
-
- pfSense.css
- 2001
- user-shell-access
-
- 2000
- 2000
- ${NTP}
-
- http
-
- 602232b5962a3
- 2
- 28080
- 2
- pfSense.css
- 1e3f75;
-
-
- yes
-
-
-
- 400000
- hadp
- hadp
- hadp
-
- monthly
-
-
-
- enabled
- enabled
- 25812
-
- Etc/UTC
- 115200
- serial
-
-
-
-
- en_US
-
-
- ${DNS_1}
- ${DNS_2}
-
-
-
-
-
- vtnet1
-
-
-
- 32
- ${IP_ADDR_WAN}
- ${SUBNET_WAN}
- WANGW
-
-
-
- vtnet0
-
-
- ${IP_ADDR_LAN}
- ${SUBNET_LAN}
-
-
-
-
-
-
-
- ${DHCP_RANGE_START}
- ${DHCP_RANGE_END}
-
-
-
-
- 94670856
- 189341712
-
-
-
-
-
-
-
-
- hmac-md5
-
-
-
- allow
- gateway.edison
-
-
-
-
-
-
-
-
-
-
-
-
-
- ::1000
- ::2000
-
- assist
- medium
-
-
-
-
-
- public
-
-
-
-
-
- 1
-
-
-
- automatic
-
-
-
-
-
- 1626111739
- pass
- wan
- inet
-
-
-
-
-
-
-
-
-
- tcp
-
-
-
- 25812
-
-
-
-
- pass
- inet
-
- lan
- 0100000101
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
- -
- 1,31
- 0-5
- *
- *
- *
- root
- /usr/bin/nice -n20 adjkerntz -a
-
- -
- 1
- 3
- 1
- *
- *
- root
- /usr/bin/nice -n20 /etc/rc.update_bogons.sh
-
- -
- 1
- 1
- *
- *
- *
- root
- /usr/bin/nice -n20 /etc/rc.dyndns.update
-
- -
- */60
- *
- *
- *
- *
- root
- /usr/bin/nice -n20 /usr/local/sbin/expiretable -v -t 3600 virusprot
-
- -
- 30
- 12
- *
- *
- *
- root
- /usr/bin/nice -n20 /etc/rc.update_urltables
-
- -
- 1
- 0
- *
- *
- *
- root
- /usr/bin/nice -n20 /etc/rc.update_pkg_metadata
-
- -
- */1
- *
- *
- *
- *
- root
- /usr/sbin/newsyslog
-
- -
- 1
- 3
- *
- *
- *
- root
- /etc/rc.periodic daily
-
- -
- 15
- 4
- *
- *
- 6
- root
- /etc/rc.periodic weekly
-
- -
- 30
- 5
- 1
- *
- *
- root
- /etc/rc.periodic monthly
-
-
-
-
-
-
-
- system_information:col1:show,netgate_services_and_support:col2:show,interfaces:col2:show
- 10
-
-
-
-
- all
- all
-
-
-
-
-
- 602232b5962a3
-
-
-
- transparent
-
-
-
-
-
-
-
-
- 602232b5962a3
-
- server
- LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUVoRENDQTJ5Z0F3SUJBZ0lCQURBTkJna3Foa2lHOXcwQkFRc0ZBREJhTVRnd05nWURWUVFLRXk5d1psTmwKYm5ObElIZGxZa052Ym1acFozVnlZWFJ2Y2lCVFpXeG1MVk5wWjI1bFpDQkRaWEowYVdacFkyRjBaVEVlTUJ3RwpBMVVFQXhNVmNHWlRaVzV6WlMwMk1ESXlNekppTlRrMk1tRXpNQjRYRFRJeE1ESXdPVEEyTlRrd01Wb1hEVEl5Ck1ETXhOREEyTlRrd01Wb3dXakU0TURZR0ExVUVDaE12Y0daVFpXNXpaU0IzWldKRGIyNW1hV2QxY21GMGIzSWcKVTJWc1ppMVRhV2R1WldRZ1EyVnlkR2xtYVdOaGRHVXhIakFjQmdOVkJBTVRGWEJtVTJWdWMyVXROakF5TWpNeQpZalU1TmpKaE16Q0NBU0l3RFFZSktvWklodmNOQVFFQkJRQURnZ0VQQURDQ0FRb0NnZ0VCQU1ZblNtUG93OUt6Cml5b2ZVeW5ZaGN2YnN3YTVaVFVYTVVmZGdjaTlzamp2Ymw1eUhHZlhaY1BCdzRuUk14eUQ4enE0dEtuZ2EzSEkKNFNqbDJkamljdHM2c0loQ1FxbUg4bGpUWWVjRkw3WDlpRUdWaHpnU0tPcURzS3NUeGhaMEs0TkVwMlM1bUdTUgowZ0pWbTlBeTJDUzhVWUFjRFUrVmd5bVR5cVVUZW5CcEVyVzlWSEZmUnhxZTBUcFlmT2tXbjRSb3RxMnNrRGlhCnN4dWhWd2pMU3dNR2lqSnJ0aTM3UGQyRnd0UW1CRGY4TEI3NmhuY1BpMmVDWXE1Ly9EZnllZkRPOVVEZ0ZxL3QKNHp6d3JkT2FTcGJoVzVRV0RjWGtNQU96OGNxTTJZZVFxVVZGRGV0aFFPcnZrMjQyaXRMTWtzWmh5aHFOL21ITgpTN0JZN3F6QXVCY0NBd0VBQWFPQ0FWTXdnZ0ZQTUFrR0ExVWRFd1FDTUFBd0VRWUpZSVpJQVliNFFnRUJCQVFECkFnWkFNQXNHQTFVZER3UUVBd0lGb0RBekJnbGdoa2dCaHZoQ0FRMEVKaFlrVDNCbGJsTlRUQ0JIWlc1bGNtRjAKWldRZ1UyVnlkbVZ5SUVObGNuUnBabWxqWVhSbE1CMEdBMVVkRGdRV0JCUzY1eDhTQmJhY3FsN3gzL05UcEFqTgpPUlpaNmpDQmdnWURWUjBqQkhzd2VZQVV1dWNmRWdXMm5LcGU4ZC96VTZRSXpUa1dXZXFoWHFSY01Gb3hPREEyCkJnTlZCQW9UTDNCbVUyVnVjMlVnZDJWaVEyOXVabWxuZFhKaGRHOXlJRk5sYkdZdFUybG5ibVZrSUVObGNuUnAKWm1sallYUmxNUjR3SEFZRFZRUURFeFZ3WmxObGJuTmxMVFl3TWpJek1tSTFPVFl5WVRPQ0FRQXdKd1lEVlIwbApCQ0F3SGdZSUt3WUJCUVVIQXdFR0NDc0dBUVVGQndNQ0JnZ3JCZ0VGQlFnQ0FqQWdCZ05WSFJFRUdUQVhnaFZ3ClpsTmxibk5sTFRZd01qSXpNbUkxT1RZeVlUTXdEUVlKS29aSWh2Y05BUUVMQlFBRGdnRUJBTVFoWi9JNy8vbVAKSVh0dHo0Y0V3MlQ5SjdlSlpGQStSUy9UcStPUVlMU2tSWTNTQVh0cElYWWRhTkdFYWxuVDM4ZmZ6VllFb2hLawpKTXpTUURkUkxYYmZRS2oxZi9ZM1B0NFlOeFg2Q2lFTUI4cWFINFlOZmphVk1pdVloaUIwKzZiaW9Ic1plR2tECitQYlEzTzZ3cUg3aWVlMEM0Q2FHUUhWVFphQ2dOelBFUU9WYmdJc245Wm1UZlBETHF4OG9RZkRybm1ETzFwV1kKWHowMDlWTmRyb3VWNlVEc0xlSTd2VjAwQ3RNa2NFSU1ZTlBaSHliYjZ4Y3EzRU1BQjlYdG9BeklLYXdGWDA3bwo1bkR1T3dqZ0d2SzdqcHp1bmxpNm54TVNlYlpocjdXOWQra1ZUQU1Qbk5zd2ZsY0hvZXo5NXFYVGZzSjA3MXVzCmZYLzloSWZ6cHlzPQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0tCg==
- LS0tLS1CRUdJTiBQUklWQVRFIEtFWS0tLS0tCk1JSUV2UUlCQURBTkJna3Foa2lHOXcwQkFRRUZBQVNDQktjd2dnU2pBZ0VBQW9JQkFRREdKMHBqNk1QU3M0c3EKSDFNcDJJWEwyN01HdVdVMUZ6RkgzWUhJdmJJNDcyNWVjaHhuMTJYRHdjT0owVE1jZy9NNnVMU3A0R3R4eU9Fbwo1ZG5ZNG5MYk9yQ0lRa0twaC9KWTAySG5CUysxL1loQmxZYzRFaWpxZzdDckU4WVdkQ3VEUktka3VaaGtrZElDClZadlFNdGdrdkZHQUhBMVBsWU1wazhxbEUzcHdhUksxdlZSeFgwY2FudEU2V0h6cEZwK0VhTGF0ckpBNG1yTWIKb1ZjSXkwc0RCb295YTdZdCt6M2RoY0xVSmdRMy9Dd2Urb1ozRDR0bmdtS3VmL3czOG5ud3p2VkE0QmF2N2VNOAo4SzNUbWtxVzRWdVVGZzNGNURBRHMvSEtqTm1Ia0tsRlJRM3JZVURxNzVOdU5vclN6SkxHWWNvYWpmNWh6VXV3CldPNnN3TGdYQWdNQkFBRUNnZ0VBU1BrWlBUZndQYjBmSi9FSFNHYlR4eUp1aUxCbjVDaFpYclRsWWM3eVBtTjIKaXRPTlFIU2pwbmJLRXd2MnZhNmVPV2JpS1psYkdEazVLajJUZjNyTmY0emFqQ09TdHlvNGhwdTU5MEVDdEF2ZApUUUhMSFV3YXcxcWlDaTZyQ1lQWXJtdnZGWG4vOURGN1hKNjRWZkVUaGx2UHk5VUkxZUVpdjRJTTQ4UUxtQytECjZ2dlNRUXQzc0dxVXJjTlFPejRROUQ1RmJMVThYWFFqZEhFVThWY1oyQlBSWU1tZHRwS0FVUUo2M3MxVDliR3kKaUhMWFdRb1BFWkpZN3lLWERjYmQ2T20zKzB4VXhEVFlmdTExdDk5dHBLaWFmM0plcFVHTEMvN0pwZ2hjN21seApmUWVhSWtoTGphSGZJalNBUWV6UTJqd1hmWGJib1AvNXp2dENhK0Q1QVFLQmdRRGtvYVhHZHBNNC9yUFgyTThaCk1wSlhBZEhvRE5CRDVWVXlqTnphRnBvTFFZa1lDWHdESHgvZnR5VDJHcnJibmdScEdzdndzWEVnS09NK3BGOGoKeVI0OG9LU1A3b1lpMFp5SUpKVERJZ2djczltMXRUVk1xd0NLNGVqTmNzZnV4ZGhma255U2l2bm45N0VnaFUrOQpQREpCamdRZys2NytIdEtBQmRzNG9mSnVnUUtCZ1FEZDM2ZGNvSEJoc1liYWlEM2cyVFdHTDhHeDNNWEI2dlQyCjFJamlISFg0U0xEQi9nMWlGUW5jQkkzM01SV3Q1ajhVc3dZWnE5bWdSZDlXNy95eDNLcFlYUU8rdDZKakcrVkQKVm43T2xPQ2s5Y3FCN0Vhbm9ZaFRmcXVSbGhYUkoxK1gyem9ITmJ6Zm0xbU5YNWtKeWdPZ1BlYy9PWmpZWnV1ZQpkWnFVUHpDS2x3S0JnUUNkVmRCckh6WjVSbTRrNEFMRTZGUmtvOC83ekxXVnc4QTVwVTVtbHA4emw4bnNUTHFXCnUxaUg3OVR6dDdsWFlRclArYXpXMm9WckNEZzU1NHZCYUtueU51cUlVakRrelRMWFRiVkNBNjFwY3lzakxLeWQKTWlDUmNwMXVwTXFKdW9VSVlCcHlEa21rSklRMFNRbHl4SFloUHNZQW5MTDBGZW51U0Jsd2c2SCtBUUtCZ0FxWgprOHBXRStobTdkVms2dWZpR0srSGVvNS95S2JpRGIvNUdXME1CUXJPbGpzK1l1OUxxQVB0NThabWZ1Mm04VjZRCnl2clEvZjJ0T09WVFk5UXZnYWZsRWJlNzJETTU4dk1MbjlWSXdZNi8rMlBIb1B5RE9Cc2RFeDlLZDFKZHVqQnMKRmRLZ1c1eW5ORmtQdHlVRGxMYkVIbGNqN2t4d1psTlV0UVRlRy9pVEFvR0FHb1d5QmJBeWcxbjhWcVBZMm9oTgpnR0FNRm13bzBtQjhWZXN2TFh4TzZubFlKMUI5RWRHVDNvR0FFSzFkc084ZzR1WXMwMkFQYWxEUnFoaUIzV2JGCnVPQ1dxRDljZ2Z6TGdWaXdkOHVLTXFhSHNOTjRWeXpIZ0UyYWY3QnlLa2czQWRBUXFDNk56ZXVvRzRhYm14OWcKTzRwVWFqZWdBd3Nic2lUektmYVpUNEE9Ci0tLS0tRU5EIFBSSVZBVEUgS0VZLS0tLS0K
-
-
-
- API
- api
-
- https://github.com/jaredhendrickson13/pfsense-api
- System
- %%PKGVERSION%%
- api.xml
- jaredhendrickson13@gmail.com
-
-
-
- lan
- jwt
- json
- e568b85aed939826ead0199aa4238e399c9e12806f4dc655f8ee4a74818a19a2
- 3600
- sha256
- 16
-
-
-
-
-
- /api/
-
-
-
-
- /api/v1/
-
-
-
-
-
-
-
-
- WANGW
-
- wan
- ${IP_GATEWAY_WAN}
- WANGW
-
- inet
-
-
-
-
- 5
- enabled
-
-
- auto
- lan
- yes
- yes
-
-
-
-
-
-
-
-
-
- edison
- !
-
-
-
-
- config-xml-dhcp.tmpl: |
-
-
- 21.5
-
-
- normal
- gateway
- edison
-
- all
-
- system
- 1998
-
-
- admins
-
- system
- 1999
- 0
- page-all
-
-
- admin
-
- system
- admins
- $2y$10$K3ejlPZsxpqhd63EzyNgf.qK4RDhlSvW2ln.gIq7kbkVdlRiZ1XB2
- 0
- user-shell-access
- 2
- c3NoLXJzYSBBQUFBQjNOemFDMXljMkVBQUFBREFRQUJBQUFCZ1FDdzR4OW43NEtsVnRqUnBBRUhSVWJldnI2dVBDa0NnYkpHSTBKTUFhdnJ3K2JlTmI1V0wyOE1nOFU0MzBiWnhJRWFHU05pSXQreDk2aGFPY0RjOEFaMjRDa0Uxb1ZUS2x6aExyQkJGc0hmaVpuMHRYK1dtZWIyaGZBcmxQRnk2ZXJaZHRVTTUrYUQ2VFBYZmR4M0VWT2FBRzNncTFhbS9lSzFBM1NPVTVjTnFZMExjcWpGVDc0dG03T0NNc1VFSkg4emlCKzQ3ZTh2TWtDZjVIcTJqdVd5RjU0NTNiYzRaRHBMcW1hSXZ0UGpyN3NyQllLczI2MWpxRFlOWEF1aytyTExoRUt4VmNuY1o1U1pDbDJyWGVIdlRnb3lqZE1IWjYvWTNKZXhuYmg0Q2dkUVlmUU9HQStyQ01mVy9MTmFBV2hCUUlWRzZ1QU5mSEhhMTZQVE1XQTQzdlhTdUFCQ0VRQmR5VVhMQi9WWFlmcklOWTJQTnNPOG1FenVWcFRSaEQrSnRpT3BFejVQTlNXZk9qSUpSYjBBSjVDR2pHb0pSczdrck5QKzZsUWlTL0k5TnV4OThURjRla2FsYkMrOG9wUGFnTEhuL2NxaG9sSm1qQXp6QzV1by9yY1VKcldUNEYvODNoYmpUdEhKT01KdEZXV2lZcURmUWVoOENGOUJoeGs9IHJvb3RAbG9jYWxob3N0
-
- pfSense.css
-
-
- user
- $2y$10$5i3toKDJLCdLS0w71m7TF.49vokuIneuA97Tq3cJYotKzeEbn0Lqu
-
- sysgwservice
-
- 2
-
-
- pfSense.css
- 2001
- user-shell-access
-
- 2000
- 2000
- ${NTP}
-
- http
-
- 602232b5962a3
- 2
- 28080
- 2
- pfSense.css
- 1e3f75;
-
-
- yes
-
-
-
- 400000
- hadp
- hadp
- hadp
-
- monthly
-
-
-
- enabled
- enabled
- 25812
-
- Etc/UTC
- 115200
- serial
-
-
-
-
- en_US
-
-
- ${DNS_1}
- ${DNS_2}
-
-
-
-
-
- vtnet1
-
- ${IP_ADDR_WAN}
-
-
- 32
-
-
-
-
-
-
-
- SavedCfg
-
-
-
-
-
-
-
- dhcp6
-
- 0
- wan
-
-
-
-
- vtnet0
-
-
- ${IP_ADDR_LAN}
- ${SUBNET_LAN}
-
-
-
-
-
-
-
- ${DHCP_RANGE_START}
- ${DHCP_RANGE_END}
-
-
-
-
- 94670856
- 189341712
-
-
-
-
-
-
-
-
- hmac-md5
-
-
-
- allow
- gateway.edison
-
-
-
-
-
-
-
-
-
-
-
-
-
- ::1000
- ::2000
-
- assist
- medium
-
-
-
-
-
- public
-
-
-
-
-
- 1
-
-
-
- automatic
-
-
-
-
-
- 1626111739
- pass
- wan
- inet
-
-
-
-
-
-
-
-
-
- tcp
-
-
-
- 25812
-
-
-
-
- pass
- inet
-
- lan
- 0100000101
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
- -
- 1,31
- 0-5
- *
- *
- *
- root
- /usr/bin/nice -n20 adjkerntz -a
-
- -
- 1
- 3
- 1
- *
- *
- root
- /usr/bin/nice -n20 /etc/rc.update_bogons.sh
-
- -
- 1
- 1
- *
- *
- *
- root
- /usr/bin/nice -n20 /etc/rc.dyndns.update
-
- -
- */60
- *
- *
- *
- *
- root
- /usr/bin/nice -n20 /usr/local/sbin/expiretable -v -t 3600 virusprot
-
- -
- 30
- 12
- *
- *
- *
- root
- /usr/bin/nice -n20 /etc/rc.update_urltables
-
- -
- 1
- 0
- *
- *
- *
- root
- /usr/bin/nice -n20 /etc/rc.update_pkg_metadata
-
- -
- */1
- *
- *
- *
- *
- root
- /usr/sbin/newsyslog
-
- -
- 1
- 3
- *
- *
- *
- root
- /etc/rc.periodic daily
-
- -
- 15
- 4
- *
- *
- 6
- root
- /etc/rc.periodic weekly
-
- -
- 30
- 5
- 1
- *
- *
- root
- /etc/rc.periodic monthly
-
-
-
-
-
-
-
- system_information:col1:show,netgate_services_and_support:col2:show,interfaces:col2:show
- 10
-
-
-
-
- all
- all
-
-
-
-
-
- 602232b5962a3
-
-
-
- transparent
-
-
-
-
-
-
-
-
- 602232b5962a3
-
- server
- LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUVoRENDQTJ5Z0F3SUJBZ0lCQURBTkJna3Foa2lHOXcwQkFRc0ZBREJhTVRnd05nWURWUVFLRXk5d1psTmwKYm5ObElIZGxZa052Ym1acFozVnlZWFJ2Y2lCVFpXeG1MVk5wWjI1bFpDQkRaWEowYVdacFkyRjBaVEVlTUJ3RwpBMVVFQXhNVmNHWlRaVzV6WlMwMk1ESXlNekppTlRrMk1tRXpNQjRYRFRJeE1ESXdPVEEyTlRrd01Wb1hEVEl5Ck1ETXhOREEyTlRrd01Wb3dXakU0TURZR0ExVUVDaE12Y0daVFpXNXpaU0IzWldKRGIyNW1hV2QxY21GMGIzSWcKVTJWc1ppMVRhV2R1WldRZ1EyVnlkR2xtYVdOaGRHVXhIakFjQmdOVkJBTVRGWEJtVTJWdWMyVXROakF5TWpNeQpZalU1TmpKaE16Q0NBU0l3RFFZSktvWklodmNOQVFFQkJRQURnZ0VQQURDQ0FRb0NnZ0VCQU1ZblNtUG93OUt6Cml5b2ZVeW5ZaGN2YnN3YTVaVFVYTVVmZGdjaTlzamp2Ymw1eUhHZlhaY1BCdzRuUk14eUQ4enE0dEtuZ2EzSEkKNFNqbDJkamljdHM2c0loQ1FxbUg4bGpUWWVjRkw3WDlpRUdWaHpnU0tPcURzS3NUeGhaMEs0TkVwMlM1bUdTUgowZ0pWbTlBeTJDUzhVWUFjRFUrVmd5bVR5cVVUZW5CcEVyVzlWSEZmUnhxZTBUcFlmT2tXbjRSb3RxMnNrRGlhCnN4dWhWd2pMU3dNR2lqSnJ0aTM3UGQyRnd0UW1CRGY4TEI3NmhuY1BpMmVDWXE1Ly9EZnllZkRPOVVEZ0ZxL3QKNHp6d3JkT2FTcGJoVzVRV0RjWGtNQU96OGNxTTJZZVFxVVZGRGV0aFFPcnZrMjQyaXRMTWtzWmh5aHFOL21ITgpTN0JZN3F6QXVCY0NBd0VBQWFPQ0FWTXdnZ0ZQTUFrR0ExVWRFd1FDTUFBd0VRWUpZSVpJQVliNFFnRUJCQVFECkFnWkFNQXNHQTFVZER3UUVBd0lGb0RBekJnbGdoa2dCaHZoQ0FRMEVKaFlrVDNCbGJsTlRUQ0JIWlc1bGNtRjAKWldRZ1UyVnlkbVZ5SUVObGNuUnBabWxqWVhSbE1CMEdBMVVkRGdRV0JCUzY1eDhTQmJhY3FsN3gzL05UcEFqTgpPUlpaNmpDQmdnWURWUjBqQkhzd2VZQVV1dWNmRWdXMm5LcGU4ZC96VTZRSXpUa1dXZXFoWHFSY01Gb3hPREEyCkJnTlZCQW9UTDNCbVUyVnVjMlVnZDJWaVEyOXVabWxuZFhKaGRHOXlJRk5sYkdZdFUybG5ibVZrSUVObGNuUnAKWm1sallYUmxNUjR3SEFZRFZRUURFeFZ3WmxObGJuTmxMVFl3TWpJek1tSTFPVFl5WVRPQ0FRQXdKd1lEVlIwbApCQ0F3SGdZSUt3WUJCUVVIQXdFR0NDc0dBUVVGQndNQ0JnZ3JCZ0VGQlFnQ0FqQWdCZ05WSFJFRUdUQVhnaFZ3ClpsTmxibk5sTFRZd01qSXpNbUkxT1RZeVlUTXdEUVlKS29aSWh2Y05BUUVMQlFBRGdnRUJBTVFoWi9JNy8vbVAKSVh0dHo0Y0V3MlQ5SjdlSlpGQStSUy9UcStPUVlMU2tSWTNTQVh0cElYWWRhTkdFYWxuVDM4ZmZ6VllFb2hLawpKTXpTUURkUkxYYmZRS2oxZi9ZM1B0NFlOeFg2Q2lFTUI4cWFINFlOZmphVk1pdVloaUIwKzZiaW9Ic1plR2tECitQYlEzTzZ3cUg3aWVlMEM0Q2FHUUhWVFphQ2dOelBFUU9WYmdJc245Wm1UZlBETHF4OG9RZkRybm1ETzFwV1kKWHowMDlWTmRyb3VWNlVEc0xlSTd2VjAwQ3RNa2NFSU1ZTlBaSHliYjZ4Y3EzRU1BQjlYdG9BeklLYXdGWDA3bwo1bkR1T3dqZ0d2SzdqcHp1bmxpNm54TVNlYlpocjdXOWQra1ZUQU1Qbk5zd2ZsY0hvZXo5NXFYVGZzSjA3MXVzCmZYLzloSWZ6cHlzPQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0tCg==
- LS0tLS1CRUdJTiBQUklWQVRFIEtFWS0tLS0tCk1JSUV2UUlCQURBTkJna3Foa2lHOXcwQkFRRUZBQVNDQktjd2dnU2pBZ0VBQW9JQkFRREdKMHBqNk1QU3M0c3EKSDFNcDJJWEwyN01HdVdVMUZ6RkgzWUhJdmJJNDcyNWVjaHhuMTJYRHdjT0owVE1jZy9NNnVMU3A0R3R4eU9Fbwo1ZG5ZNG5MYk9yQ0lRa0twaC9KWTAySG5CUysxL1loQmxZYzRFaWpxZzdDckU4WVdkQ3VEUktka3VaaGtrZElDClZadlFNdGdrdkZHQUhBMVBsWU1wazhxbEUzcHdhUksxdlZSeFgwY2FudEU2V0h6cEZwK0VhTGF0ckpBNG1yTWIKb1ZjSXkwc0RCb295YTdZdCt6M2RoY0xVSmdRMy9Dd2Urb1ozRDR0bmdtS3VmL3czOG5ud3p2VkE0QmF2N2VNOAo4SzNUbWtxVzRWdVVGZzNGNURBRHMvSEtqTm1Ia0tsRlJRM3JZVURxNzVOdU5vclN6SkxHWWNvYWpmNWh6VXV3CldPNnN3TGdYQWdNQkFBRUNnZ0VBU1BrWlBUZndQYjBmSi9FSFNHYlR4eUp1aUxCbjVDaFpYclRsWWM3eVBtTjIKaXRPTlFIU2pwbmJLRXd2MnZhNmVPV2JpS1psYkdEazVLajJUZjNyTmY0emFqQ09TdHlvNGhwdTU5MEVDdEF2ZApUUUhMSFV3YXcxcWlDaTZyQ1lQWXJtdnZGWG4vOURGN1hKNjRWZkVUaGx2UHk5VUkxZUVpdjRJTTQ4UUxtQytECjZ2dlNRUXQzc0dxVXJjTlFPejRROUQ1RmJMVThYWFFqZEhFVThWY1oyQlBSWU1tZHRwS0FVUUo2M3MxVDliR3kKaUhMWFdRb1BFWkpZN3lLWERjYmQ2T20zKzB4VXhEVFlmdTExdDk5dHBLaWFmM0plcFVHTEMvN0pwZ2hjN21seApmUWVhSWtoTGphSGZJalNBUWV6UTJqd1hmWGJib1AvNXp2dENhK0Q1QVFLQmdRRGtvYVhHZHBNNC9yUFgyTThaCk1wSlhBZEhvRE5CRDVWVXlqTnphRnBvTFFZa1lDWHdESHgvZnR5VDJHcnJibmdScEdzdndzWEVnS09NK3BGOGoKeVI0OG9LU1A3b1lpMFp5SUpKVERJZ2djczltMXRUVk1xd0NLNGVqTmNzZnV4ZGhma255U2l2bm45N0VnaFUrOQpQREpCamdRZys2NytIdEtBQmRzNG9mSnVnUUtCZ1FEZDM2ZGNvSEJoc1liYWlEM2cyVFdHTDhHeDNNWEI2dlQyCjFJamlISFg0U0xEQi9nMWlGUW5jQkkzM01SV3Q1ajhVc3dZWnE5bWdSZDlXNy95eDNLcFlYUU8rdDZKakcrVkQKVm43T2xPQ2s5Y3FCN0Vhbm9ZaFRmcXVSbGhYUkoxK1gyem9ITmJ6Zm0xbU5YNWtKeWdPZ1BlYy9PWmpZWnV1ZQpkWnFVUHpDS2x3S0JnUUNkVmRCckh6WjVSbTRrNEFMRTZGUmtvOC83ekxXVnc4QTVwVTVtbHA4emw4bnNUTHFXCnUxaUg3OVR6dDdsWFlRclArYXpXMm9WckNEZzU1NHZCYUtueU51cUlVakRrelRMWFRiVkNBNjFwY3lzakxLeWQKTWlDUmNwMXVwTXFKdW9VSVlCcHlEa21rSklRMFNRbHl4SFloUHNZQW5MTDBGZW51U0Jsd2c2SCtBUUtCZ0FxWgprOHBXRStobTdkVms2dWZpR0srSGVvNS95S2JpRGIvNUdXME1CUXJPbGpzK1l1OUxxQVB0NThabWZ1Mm04VjZRCnl2clEvZjJ0T09WVFk5UXZnYWZsRWJlNzJETTU4dk1MbjlWSXdZNi8rMlBIb1B5RE9Cc2RFeDlLZDFKZHVqQnMKRmRLZ1c1eW5ORmtQdHlVRGxMYkVIbGNqN2t4d1psTlV0UVRlRy9pVEFvR0FHb1d5QmJBeWcxbjhWcVBZMm9oTgpnR0FNRm13bzBtQjhWZXN2TFh4TzZubFlKMUI5RWRHVDNvR0FFSzFkc084ZzR1WXMwMkFQYWxEUnFoaUIzV2JGCnVPQ1dxRDljZ2Z6TGdWaXdkOHVLTXFhSHNOTjRWeXpIZ0UyYWY3QnlLa2czQWRBUXFDNk56ZXVvRzRhYm14OWcKTzRwVWFqZWdBd3Nic2lUektmYVpUNEE9Ci0tLS0tRU5EIFBSSVZBVEUgS0VZLS0tLS0K
-
-
-
- API
- api
-
- https://github.com/jaredhendrickson13/pfsense-api
- System
- %%PKGVERSION%%
- api.xml
- jaredhendrickson13@gmail.com
-
-
-
- lan
- jwt
- json
- e568b85aed939826ead0199aa4238e399c9e12806f4dc655f8ee4a74818a19a2
- 3600
- sha256
- 16
-
-
-
-
-
- /api/
-
-
-
-
- /api/v1/
-
-
-
-
-
-
-
-
-
- 5
- enabled
-
-
- auto
- lan
- yes
- yes
-
-
-
-
-
-
-
-
-
- edison
- !
-
-
-
-
- kind: ConfigMap
- metadata:
- name: pfsense-vm-files
- ---
- apiVersion: v1
- data:
- credentials: VVJJX1RFTVBMQVRFOiAicWVtdStzc2g6Ly8lc0Alcy9zeXN0ZW0/a2V5ZmlsZT0lcyZzb2NrZXQ9JXMma25vd25faG9zdHNfdmVyaWZ5PWlnbm9yZSI=
- kind: Secret
- metadata:
- name: libvirt-account-creds
- type: Opaque
- ---
- apiVersion: terraform.core.oam.dev/v1beta1
- kind: Provider
- metadata:
- name: libvirt
- spec:
- provider: custom
- credentials:
- source: Secret
- secretRef:
- name: libvirt-account-creds
- key: credentials
- ---
- apiVersion: terraform.core.oam.dev/v1beta1
- kind: Configuration
- metadata:
- name: pfsense-gateway-vm-config
- spec:
- volumeSpec:
- volumeMounts:
- - name: libvirt
- mountPath: /var/run/libvirt/libvirt-sock
- - name: spectro-directory
- mountPath: /opt/spectrocloud
- - mountPath: "/var/files"
- name: files-vol
- volumes:
- - name: files-vol
- configMap:
- name: pfsense-vm-files
- - name: libvirt
- hostPath:
- path: /var/run/libvirt/libvirt-sock
- type: Socket
- - name: spectro-directory
- hostPath:
- path: /opt/spectrocloud
- type: Directory
- deleteResource: true
- variable:
- VM_NAME: "pfsense-gateway-vm"
- NTP: FROM_SECRET_REF
- DNS_1: FROM_SECRET_REF
- DNS_2: FROM_SECRET_REF
- IP_ADDR_WAN: FROM_SECRET_REF
- IP_GATEWAY_WAN: FROM_SECRET_REF
- SUBNET_WAN: FROM_SECRET_REF
- IP_ADDR_LAN: FROM_SECRET_REF
- SUBNET_LAN: FROM_SECRET_REF
- DHCP_RANGE_START: FROM_SECRET_REF
- DHCP_RANGE_END: FROM_SECRET_REF
- providerRef:
- name: libvirt
- hcl: |
- terraform {
- required_version = ">= 0.13"
- required_providers {
- libvirt = {
- source = "dmacvicar/libvirt"
- version = "0.6.14"
- }
- }
- }
-
- locals {
- config_file = try(var.IP_ADDR_WAN == "dhcp", false) ? "/var/files/config-xml-dhcp.tmpl" : "/var/files/config-xml-static.tmpl"
-
- dhcp_param_map = {
- NTP = var.NTP
- DNS_1 = var.DNS_1
- DNS_2 = var.DNS_2
- IP_ADDR_WAN = var.IP_ADDR_WAN
- IP_ADDR_LAN = var.IP_ADDR_LAN
- SUBNET_LAN = var.SUBNET_LAN
- DHCP_RANGE_START = var.DHCP_RANGE_START
- DHCP_RANGE_END = var.DHCP_RANGE_END
- }
-
- static_param_map = {
- NTP = var.NTP
- DNS_1 = var.DNS_1
- DNS_2 = var.DNS_2
- IP_ADDR_WAN = var.IP_ADDR_WAN
- SUBNET_WAN = var.SUBNET_WAN
- IP_GATEWAY_WAN = var.IP_GATEWAY_WAN
- IP_ADDR_LAN = var.IP_ADDR_LAN
- SUBNET_LAN = var.SUBNET_LAN
- DHCP_RANGE_START = var.DHCP_RANGE_START
- DHCP_RANGE_END = var.DHCP_RANGE_END
- }
- }
-
- ##### VARIABLES #####
- variable "URI_TEMPLATE" {
- type = string
- default = "qemu+ssh://%s@%s/system?keyfile=%s&socket=%s&known_hosts_verify=ignore"
- }
-
- variable "NTP" {
- type = string
- default = "ntp.ge.com ntp1.ge.com"
- }
-
- variable "DNS_1" {
- type = string
- default = "10.220.220.220"
- }
-
- variable "DNS_2" {
- type = string
- default = "10.220.220.221"
- }
-
- variable "IP_ADDR_WAN" {
- type = string
- default = "dhcp"
- }
-
- variable "SUBNET_WAN" {
- type = string
- default = "23"
- }
-
- variable "IP_GATEWAY_WAN" {
- type = string
- default = ""
- }
-
- variable "IP_ADDR_LAN" {
- type = string
- default = "192.168.100.1"
- }
-
- variable "SUBNET_LAN" {
- type = string
- default = "24"
- }
-
- variable "DHCP_RANGE_START" {
- type = string
- default = "192.168.100.50"
- }
-
- variable "DHCP_RANGE_END" {
- type = string
- default = "192.168.100.250"
- }
-
- variable "LIBVIRT_SOCKET" {
- type = string
- default = "/var/run/libvirt/libvirt-sock"
- }
-
- variable "VM_NAME" {
- type = string
- default = "pfsense-terraform"
- }
-
- ##### OUTPUT #####
- output "config_value" {
- value = data.template_file.config.rendered
- }
-
- output "config_file" {
- value = local.config_file
- }
-
- output "config_params" {
- value = try(var.IP_ADDR_WAN == "dhcp", false) ? local.dhcp_param_map : local.static_param_map
- }
-
- ##### PROVIDER #####
- provider "libvirt" {
- uri = "qemu:///system"
- #uri = format(var.URI_TEMPLATE, var.SSH_USER, var.HOST_IP, var.SSH_KEY, var.LIBVIRT_SOCKET)
- }
-
- data "template_file" "config" {
- template = file(local.config_file)
- vars = try(var.IP_ADDR_WAN == "dhcp", false) ? local.dhcp_param_map : local.static_param_map
- }
-
- data "template_file" "user_data" {
- template = file("/var/files/user-data.tmpl")
- vars = {
- CONFIG_XML = base64encode(data.template_file.config.rendered)
- }
- }
-
- resource "libvirt_pool" "ubuntu" {
- name = "ubuntuop"
- type = "dir"
- path = "/var/lib/libvirt/terraform-provider-libvirt-pool-ubuntuoperator"
- }
-
- resource "libvirt_volume" "ubuntu-qcow2" {
- name = "ubuntu-qcow2"
- pool = libvirt_pool.ubuntu.name
- source = "/opt/spectrocloud/vm-operator/ehl-system-gateway.qcow2"
- format = "qcow2"
- }
-
- resource "libvirt_cloudinit_disk" "commoninit" {
- name = "commoninit.iso"
- user_data = data.template_file.user_data.rendered
- pool = libvirt_pool.ubuntu.name
- }
-
- resource "libvirt_domain" "domain-ubuntu" {
- name = var.VM_NAME
- memory = "2048"
- vcpu = 1
-
- cloudinit = libvirt_cloudinit_disk.commoninit.id
-
- network_interface {
- bridge = "br0"
- }
-
- network_interface {
- bridge = "br1"
- }
-
- console {
- type = "pty"
- target_port = "0"
- target_type = "serial"
- }
-
- disk {
- volume_id = libvirt_volume.ubuntu-qcow2.id
- }
-
- graphics {
- type = "vnc"
- listen_type = "address"
- listen_address = "0.0.0.0"
- autoport = true
- }
- }
-
diff --git a/examples/local-simplified-yaml/project-bm-200/main.tf b/examples/local-simplified-yaml/project-bm-200/main.tf
deleted file mode 100644
index 3a7298c..0000000
--- a/examples/local-simplified-yaml/project-bm-200/main.tf
+++ /dev/null
@@ -1,71 +0,0 @@
-terraform {
- required_version = ">= 0.14.0"
-
- required_providers {
- spectrocloud = {
- version = "= 0.6.10-pre"
- source = "spectrocloud/spectrocloud"
- }
- }
-}
-
-variable "sc_host" {}
-variable "sc_api_key" {
- sensitive = true
-}
-variable "sc_project_name" {}
-
-provider "spectrocloud" {
- host = var.sc_host
- api_key = var.sc_api_key
- project_name = var.sc_project_name
-}
-
-locals {
- profiles = {
- for k in fileset("config/profile", "profile-*.yaml") :
- trimsuffix(k, ".yaml") => yamldecode(file("config/profile/${k}"))
- }
-
- appliances = {
- for k in fileset("config/appliance", "appliance-*.yaml") :
- trimsuffix(k, ".yaml") => yamldecode(file("config/appliance/${k}"))
- }
-
- clusters = {
- for k in fileset("config/cluster", "cluster-*.yaml") :
- trimsuffix(k, ".yaml") => yamldecode(file("config/cluster/${k}"))
- }
-}
-
-module "SpectroSystemProfile" {
- source = "github.com/spectrocloud/terraform-spectrocloud-modules"
-
- # It is recommended to use latest version of module instead of using latest from github
- #source = "spectrocloud/modules/spectrocloud"
- #version = "0.0.7"
-
- profiles = local.profiles
-}
-
-module "SpectroAppliance" {
- depends_on = [module.SpectroSystemProfile]
- source = "github.com/spectrocloud/terraform-spectrocloud-modules"
-
- # It is recommended to use latest version of module instead of using latest from github
- #source = "spectrocloud/modules/spectrocloud"
- #version = "0.0.7"
-
- appliances = local.appliances
-}
-
-module "SpectroCluster" {
- depends_on = [module.SpectroAppliance]
- source = "github.com/spectrocloud/terraform-spectrocloud-modules"
-
- # It is recommended to use latest version of module instead of using latest from github
- #source = "spectrocloud/modules/spectrocloud"
- #version = "0.0.7"
-
- clusters = local.clusters
-}
diff --git a/examples/local-simplified-yaml/project-bm-200/terraform.template.tfvars b/examples/local-simplified-yaml/project-bm-200/terraform.template.tfvars
deleted file mode 100644
index 3c4423e..0000000
--- a/examples/local-simplified-yaml/project-bm-200/terraform.template.tfvars
+++ /dev/null
@@ -1,4 +0,0 @@
-# Credentials
-sc_host = "{enter Spectro Cloud host, blank for SaaS}"
-sc_api_key = "{enter Spectro Cloud API Key}"
-sc_project_name = "{enter Spectro Cloud Project Name}"
\ No newline at end of file
diff --git a/examples/local-simplified-yaml/project-vsphere-with-creds-201/README.md b/examples/local-simplified-yaml/project-vsphere-with-creds-201/README.md
deleted file mode 100644
index 73757e1..0000000
--- a/examples/local-simplified-yaml/project-vsphere-with-creds-201/README.md
+++ /dev/null
@@ -1 +0,0 @@
-Vsphere with creds appliance project
\ No newline at end of file
diff --git a/examples/local-simplified-yaml/project-vsphere-with-creds-201/config/appliance/appliance-hospital-201.yaml b/examples/local-simplified-yaml/project-vsphere-with-creds-201/config/appliance/appliance-hospital-201.yaml
deleted file mode 100644
index 515e47e..0000000
--- a/examples/local-simplified-yaml/project-vsphere-with-creds-201/config/appliance/appliance-hospital-201.yaml
+++ /dev/null
@@ -1 +0,0 @@
-id: "hospital-201"
diff --git a/examples/local-simplified-yaml/project-vsphere-with-creds-201/config/cluster/cluster-hospital-201.yaml b/examples/local-simplified-yaml/project-vsphere-with-creds-201/config/cluster/cluster-hospital-201.yaml
deleted file mode 100644
index 32b51de..0000000
--- a/examples/local-simplified-yaml/project-vsphere-with-creds-201/config/cluster/cluster-hospital-201.yaml
+++ /dev/null
@@ -1,44 +0,0 @@
-name: hospital-201
-cloudType: edge-vsphere
-edge_host_uid: hospital-201
-tags:
-- "skip_completion"
-profiles:
- infra:
- name: vsphere-with-creds-infra
- system:
- name: hospital-201-system-profile
- addons:
- - name: spectro-core
-cloud_config:
- ssh_key: "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQCr3hE9IS5UUDPqNOiEWVJvVDS0v57QKjb1o9ubvvATQNg2T3x+inckfzfgX3et1H9X1oSp0FqY1+Mmy5nfTtTyIj5Get1cntcC4QqdZV8Op1tmpI01hYMj4lLn55WNaXgDt+35tJ47kWRr5RqTGV05MPNWN3klaVsePsqa+MgCjnLfCBiOz1tpBOgxqPNqtQPXh+/T/Ul6ZDUW/rySr9iNR9uGd04tYzD7wdTdvmZSRgWEre//IipNzMnnZC7El5KJCQn8ksF+DYY9eT9NtNFEMALTZC6hn8BnMc14zqxoJP/GNHftmig8TJC500Uofdr4OKTCRr1JwHS79Cx9LyZdAp/1D8mL6bIMyGOTPVQ8xUpmEYj77m1kdiCHCk22YtLyfUWuQ0SC+2p1soDoNfJUpmxcKboOTZsLq1HDCFrqSyLUWS1PrYZ/MzhsPrsDewB1iHLbYDt87r2odJOpxMO1vNWMOYontODdr5JPKBpCcd/noNyOy/m4Spntytfb/J3kM1oz3dpPfN0xXmC19uR1xHklmbtg1j784IMu7umI2ZCpUwLADAodkbxmbacdkp5I+1NFgrFamvnTjjQAvRexV31m4m9GielKFQ4tCCId2yagMBWRFn5taEhb3SKnRxBcAzaJLopUyErOtqxvSywGvb53v4MEShqBaQSUv4gHfw== spectro2022"
- vip: 192.168.100.15
- static_ip: false
- network_type: VIP
- datacenter: "Spectrocloud"
- folder: "demo-creds-1"
-node_groups:
- - name: master-pool
- control_plane: true
- control_plane_as_worker: true
- count: 1
- disk_size_gb: 60
- memory_mb: 8192
- cpu: 2
- placement:
- cluster: "Spectrocloud"
- resource_pool: "Resources"
- datastore: "VOL_SC_CMP1_02"
- network: "Spectro-Pvt-Net05"
-
- - name: worker-pool
- count: 3
- disk_size_gb: 60
- memory_mb: 8192
- cpu: 4
- placement:
- cluster: "Spectrocloud"
- resource_pool: "Resources"
- datastore: "VOL_SC_CMP1_02"
- network: "Spectro-Pvt-Net05"
-
diff --git a/examples/local-simplified-yaml/project-vsphere-with-creds-201/config/profile/profile-system.yaml b/examples/local-simplified-yaml/project-vsphere-with-creds-201/config/profile/profile-system.yaml
deleted file mode 100644
index 34c256b..0000000
--- a/examples/local-simplified-yaml/project-vsphere-with-creds-201/config/profile/profile-system.yaml
+++ /dev/null
@@ -1,751 +0,0 @@
-name: hospital-201-system-profile
-description: system-profile
-type: system
-cloudType: all
-packs:
- - name: "pfsense-gateway"
- type: manifest
- registry: Public Repo
- manifests:
- - name: pfsense-config
- content: |
- apiVersion: v1
- data:
- user-data.tmpl: |
- #cloud-config
- write_files:
- - encoding: base64
- content: ${CONFIG_XML}
- owner: root:wheel
- path: /cf/conf/config.xml
- permissions: '0644'
- config-xml.tmpl: |
-
-
- 21.5
-
-
- normal
- gateway
- edison
-
- all
-
- system
- 1998
-
-
- admins
-
- system
- 1999
- 0
- page-all
-
-
- admin
-
- system
- admins
- $2y$10$K3ejlPZsxpqhd63EzyNgf.qK4RDhlSvW2ln.gIq7kbkVdlRiZ1XB2
- 0
- user-shell-access
- 2
- c3NoLXJzYSBBQUFBQjNOemFDMXljMkVBQUFBREFRQUJBQUFCZ1FDdzR4OW43NEtsVnRqUnBBRUhSVWJldnI2dVBDa0NnYkpHSTBKTUFhdnJ3K2JlTmI1V0wyOE1nOFU0MzBiWnhJRWFHU05pSXQreDk2aGFPY0RjOEFaMjRDa0Uxb1ZUS2x6aExyQkJGc0hmaVpuMHRYK1dtZWIyaGZBcmxQRnk2ZXJaZHRVTTUrYUQ2VFBYZmR4M0VWT2FBRzNncTFhbS9lSzFBM1NPVTVjTnFZMExjcWpGVDc0dG03T0NNc1VFSkg4emlCKzQ3ZTh2TWtDZjVIcTJqdVd5RjU0NTNiYzRaRHBMcW1hSXZ0UGpyN3NyQllLczI2MWpxRFlOWEF1aytyTExoRUt4VmNuY1o1U1pDbDJyWGVIdlRnb3lqZE1IWjYvWTNKZXhuYmg0Q2dkUVlmUU9HQStyQ01mVy9MTmFBV2hCUUlWRzZ1QU5mSEhhMTZQVE1XQTQzdlhTdUFCQ0VRQmR5VVhMQi9WWFlmcklOWTJQTnNPOG1FenVWcFRSaEQrSnRpT3BFejVQTlNXZk9qSUpSYjBBSjVDR2pHb0pSczdrck5QKzZsUWlTL0k5TnV4OThURjRla2FsYkMrOG9wUGFnTEhuL2NxaG9sSm1qQXp6QzV1by9yY1VKcldUNEYvODNoYmpUdEhKT01KdEZXV2lZcURmUWVoOENGOUJoeGs9IHJvb3RAbG9jYWxob3N0
-
- pfSense.css
-
-
- user
- $2y$10$5i3toKDJLCdLS0w71m7TF.49vokuIneuA97Tq3cJYotKzeEbn0Lqu
-
- sysgwservice
-
- 2
-
-
- pfSense.css
- 2001
- user-shell-access
-
- 2000
- 2000
- ${NTP}
-
- http
-
- 602232b5962a3
- 2
- 28080
- 2
- pfSense.css
- 1e3f75;
-
-
- yes
-
-
-
- 400000
- hadp
- hadp
- hadp
-
- monthly
-
-
-
- enabled
- enabled
- 25812
-
- Etc/UTC
- 115200
- serial
-
-
-
-
- en_US
-
-
- ${DNS_1}
- ${DNS_2}
-
-
-
-
-
- vmx1
-
-
-
- 32
- ${IP_ADDR_WAN}
- ${SUBNET_WAN}
- WANGW
-
-
-
- vmx0
-
-
- ${IP_ADDR_LAN}
- ${SUBNET_LAN}
-
-
-
-
-
-
-
- ${DHCP_RANGE_START}
- ${DHCP_RANGE_END}
-
-
-
-
- 94670856
- 189341712
-
-
-
-
-
-
-
-
- hmac-md5
-
-
-
- allow
- gateway.edison
-
-
-
-
-
-
-
-
-
-
-
-
-
- ::1000
- ::2000
-
- assist
- medium
-
-
-
-
-
- public
-
-
-
-
-
- 1
-
-
-
- automatic
-
-
-
-
-
- 1626111739
- pass
- wan
- inet
-
-
-
-
-
-
-
-
-
- tcp
-
-
-
- 25812
-
-
-
-
- pass
- inet
-
- lan
- 0100000101
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
- -
- 1,31
- 0-5
- *
- *
- *
- root
- /usr/bin/nice -n20 adjkerntz -a
-
- -
- 1
- 3
- 1
- *
- *
- root
- /usr/bin/nice -n20 /etc/rc.update_bogons.sh
-
- -
- 1
- 1
- *
- *
- *
- root
- /usr/bin/nice -n20 /etc/rc.dyndns.update
-
- -
- */60
- *
- *
- *
- *
- root
- /usr/bin/nice -n20 /usr/local/sbin/expiretable -v -t 3600 virusprot
-
- -
- 30
- 12
- *
- *
- *
- root
- /usr/bin/nice -n20 /etc/rc.update_urltables
-
- -
- 1
- 0
- *
- *
- *
- root
- /usr/bin/nice -n20 /etc/rc.update_pkg_metadata
-
- -
- */1
- *
- *
- *
- *
- root
- /usr/sbin/newsyslog
-
- -
- 1
- 3
- *
- *
- *
- root
- /etc/rc.periodic daily
-
- -
- 15
- 4
- *
- *
- 6
- root
- /etc/rc.periodic weekly
-
- -
- 30
- 5
- 1
- *
- *
- root
- /etc/rc.periodic monthly
-
-
-
-
-
-
-
- system_information:col1:show,netgate_services_and_support:col2:show,interfaces:col2:show
- 10
-
-
-
-
- all
- all
-
-
-
-
-
- 602232b5962a3
-
-
-
- transparent
-
-
-
-
-
-
-
-
- 602232b5962a3
-
- server
- LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUVoRENDQTJ5Z0F3SUJBZ0lCQURBTkJna3Foa2lHOXcwQkFRc0ZBREJhTVRnd05nWURWUVFLRXk5d1psTmwKYm5ObElIZGxZa052Ym1acFozVnlZWFJ2Y2lCVFpXeG1MVk5wWjI1bFpDQkRaWEowYVdacFkyRjBaVEVlTUJ3RwpBMVVFQXhNVmNHWlRaVzV6WlMwMk1ESXlNekppTlRrMk1tRXpNQjRYRFRJeE1ESXdPVEEyTlRrd01Wb1hEVEl5Ck1ETXhOREEyTlRrd01Wb3dXakU0TURZR0ExVUVDaE12Y0daVFpXNXpaU0IzWldKRGIyNW1hV2QxY21GMGIzSWcKVTJWc1ppMVRhV2R1WldRZ1EyVnlkR2xtYVdOaGRHVXhIakFjQmdOVkJBTVRGWEJtVTJWdWMyVXROakF5TWpNeQpZalU1TmpKaE16Q0NBU0l3RFFZSktvWklodmNOQVFFQkJRQURnZ0VQQURDQ0FRb0NnZ0VCQU1ZblNtUG93OUt6Cml5b2ZVeW5ZaGN2YnN3YTVaVFVYTVVmZGdjaTlzamp2Ymw1eUhHZlhaY1BCdzRuUk14eUQ4enE0dEtuZ2EzSEkKNFNqbDJkamljdHM2c0loQ1FxbUg4bGpUWWVjRkw3WDlpRUdWaHpnU0tPcURzS3NUeGhaMEs0TkVwMlM1bUdTUgowZ0pWbTlBeTJDUzhVWUFjRFUrVmd5bVR5cVVUZW5CcEVyVzlWSEZmUnhxZTBUcFlmT2tXbjRSb3RxMnNrRGlhCnN4dWhWd2pMU3dNR2lqSnJ0aTM3UGQyRnd0UW1CRGY4TEI3NmhuY1BpMmVDWXE1Ly9EZnllZkRPOVVEZ0ZxL3QKNHp6d3JkT2FTcGJoVzVRV0RjWGtNQU96OGNxTTJZZVFxVVZGRGV0aFFPcnZrMjQyaXRMTWtzWmh5aHFOL21ITgpTN0JZN3F6QXVCY0NBd0VBQWFPQ0FWTXdnZ0ZQTUFrR0ExVWRFd1FDTUFBd0VRWUpZSVpJQVliNFFnRUJCQVFECkFnWkFNQXNHQTFVZER3UUVBd0lGb0RBekJnbGdoa2dCaHZoQ0FRMEVKaFlrVDNCbGJsTlRUQ0JIWlc1bGNtRjAKWldRZ1UyVnlkbVZ5SUVObGNuUnBabWxqWVhSbE1CMEdBMVVkRGdRV0JCUzY1eDhTQmJhY3FsN3gzL05UcEFqTgpPUlpaNmpDQmdnWURWUjBqQkhzd2VZQVV1dWNmRWdXMm5LcGU4ZC96VTZRSXpUa1dXZXFoWHFSY01Gb3hPREEyCkJnTlZCQW9UTDNCbVUyVnVjMlVnZDJWaVEyOXVabWxuZFhKaGRHOXlJRk5sYkdZdFUybG5ibVZrSUVObGNuUnAKWm1sallYUmxNUjR3SEFZRFZRUURFeFZ3WmxObGJuTmxMVFl3TWpJek1tSTFPVFl5WVRPQ0FRQXdKd1lEVlIwbApCQ0F3SGdZSUt3WUJCUVVIQXdFR0NDc0dBUVVGQndNQ0JnZ3JCZ0VGQlFnQ0FqQWdCZ05WSFJFRUdUQVhnaFZ3ClpsTmxibk5sTFRZd01qSXpNbUkxT1RZeVlUTXdEUVlKS29aSWh2Y05BUUVMQlFBRGdnRUJBTVFoWi9JNy8vbVAKSVh0dHo0Y0V3MlQ5SjdlSlpGQStSUy9UcStPUVlMU2tSWTNTQVh0cElYWWRhTkdFYWxuVDM4ZmZ6VllFb2hLawpKTXpTUURkUkxYYmZRS2oxZi9ZM1B0NFlOeFg2Q2lFTUI4cWFINFlOZmphVk1pdVloaUIwKzZiaW9Ic1plR2tECitQYlEzTzZ3cUg3aWVlMEM0Q2FHUUhWVFphQ2dOelBFUU9WYmdJc245Wm1UZlBETHF4OG9RZkRybm1ETzFwV1kKWHowMDlWTmRyb3VWNlVEc0xlSTd2VjAwQ3RNa2NFSU1ZTlBaSHliYjZ4Y3EzRU1BQjlYdG9BeklLYXdGWDA3bwo1bkR1T3dqZ0d2SzdqcHp1bmxpNm54TVNlYlpocjdXOWQra1ZUQU1Qbk5zd2ZsY0hvZXo5NXFYVGZzSjA3MXVzCmZYLzloSWZ6cHlzPQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0tCg==
- LS0tLS1CRUdJTiBQUklWQVRFIEtFWS0tLS0tCk1JSUV2UUlCQURBTkJna3Foa2lHOXcwQkFRRUZBQVNDQktjd2dnU2pBZ0VBQW9JQkFRREdKMHBqNk1QU3M0c3EKSDFNcDJJWEwyN01HdVdVMUZ6RkgzWUhJdmJJNDcyNWVjaHhuMTJYRHdjT0owVE1jZy9NNnVMU3A0R3R4eU9Fbwo1ZG5ZNG5MYk9yQ0lRa0twaC9KWTAySG5CUysxL1loQmxZYzRFaWpxZzdDckU4WVdkQ3VEUktka3VaaGtrZElDClZadlFNdGdrdkZHQUhBMVBsWU1wazhxbEUzcHdhUksxdlZSeFgwY2FudEU2V0h6cEZwK0VhTGF0ckpBNG1yTWIKb1ZjSXkwc0RCb295YTdZdCt6M2RoY0xVSmdRMy9Dd2Urb1ozRDR0bmdtS3VmL3czOG5ud3p2VkE0QmF2N2VNOAo4SzNUbWtxVzRWdVVGZzNGNURBRHMvSEtqTm1Ia0tsRlJRM3JZVURxNzVOdU5vclN6SkxHWWNvYWpmNWh6VXV3CldPNnN3TGdYQWdNQkFBRUNnZ0VBU1BrWlBUZndQYjBmSi9FSFNHYlR4eUp1aUxCbjVDaFpYclRsWWM3eVBtTjIKaXRPTlFIU2pwbmJLRXd2MnZhNmVPV2JpS1psYkdEazVLajJUZjNyTmY0emFqQ09TdHlvNGhwdTU5MEVDdEF2ZApUUUhMSFV3YXcxcWlDaTZyQ1lQWXJtdnZGWG4vOURGN1hKNjRWZkVUaGx2UHk5VUkxZUVpdjRJTTQ4UUxtQytECjZ2dlNRUXQzc0dxVXJjTlFPejRROUQ1RmJMVThYWFFqZEhFVThWY1oyQlBSWU1tZHRwS0FVUUo2M3MxVDliR3kKaUhMWFdRb1BFWkpZN3lLWERjYmQ2T20zKzB4VXhEVFlmdTExdDk5dHBLaWFmM0plcFVHTEMvN0pwZ2hjN21seApmUWVhSWtoTGphSGZJalNBUWV6UTJqd1hmWGJib1AvNXp2dENhK0Q1QVFLQmdRRGtvYVhHZHBNNC9yUFgyTThaCk1wSlhBZEhvRE5CRDVWVXlqTnphRnBvTFFZa1lDWHdESHgvZnR5VDJHcnJibmdScEdzdndzWEVnS09NK3BGOGoKeVI0OG9LU1A3b1lpMFp5SUpKVERJZ2djczltMXRUVk1xd0NLNGVqTmNzZnV4ZGhma255U2l2bm45N0VnaFUrOQpQREpCamdRZys2NytIdEtBQmRzNG9mSnVnUUtCZ1FEZDM2ZGNvSEJoc1liYWlEM2cyVFdHTDhHeDNNWEI2dlQyCjFJamlISFg0U0xEQi9nMWlGUW5jQkkzM01SV3Q1ajhVc3dZWnE5bWdSZDlXNy95eDNLcFlYUU8rdDZKakcrVkQKVm43T2xPQ2s5Y3FCN0Vhbm9ZaFRmcXVSbGhYUkoxK1gyem9ITmJ6Zm0xbU5YNWtKeWdPZ1BlYy9PWmpZWnV1ZQpkWnFVUHpDS2x3S0JnUUNkVmRCckh6WjVSbTRrNEFMRTZGUmtvOC83ekxXVnc4QTVwVTVtbHA4emw4bnNUTHFXCnUxaUg3OVR6dDdsWFlRclArYXpXMm9WckNEZzU1NHZCYUtueU51cUlVakRrelRMWFRiVkNBNjFwY3lzakxLeWQKTWlDUmNwMXVwTXFKdW9VSVlCcHlEa21rSklRMFNRbHl4SFloUHNZQW5MTDBGZW51U0Jsd2c2SCtBUUtCZ0FxWgprOHBXRStobTdkVms2dWZpR0srSGVvNS95S2JpRGIvNUdXME1CUXJPbGpzK1l1OUxxQVB0NThabWZ1Mm04VjZRCnl2clEvZjJ0T09WVFk5UXZnYWZsRWJlNzJETTU4dk1MbjlWSXdZNi8rMlBIb1B5RE9Cc2RFeDlLZDFKZHVqQnMKRmRLZ1c1eW5ORmtQdHlVRGxMYkVIbGNqN2t4d1psTlV0UVRlRy9pVEFvR0FHb1d5QmJBeWcxbjhWcVBZMm9oTgpnR0FNRm13bzBtQjhWZXN2TFh4TzZubFlKMUI5RWRHVDNvR0FFSzFkc084ZzR1WXMwMkFQYWxEUnFoaUIzV2JGCnVPQ1dxRDljZ2Z6TGdWaXdkOHVLTXFhSHNOTjRWeXpIZ0UyYWY3QnlLa2czQWRBUXFDNk56ZXVvRzRhYm14OWcKTzRwVWFqZWdBd3Nic2lUektmYVpUNEE9Ci0tLS0tRU5EIFBSSVZBVEUgS0VZLS0tLS0K
-
-
-
- API
- api
-
- https://github.com/jaredhendrickson13/pfsense-api
- System
- %%PKGVERSION%%
- api.xml
- jaredhendrickson13@gmail.com
-
-
-
- lan
- jwt
- json
- e568b85aed939826ead0199aa4238e399c9e12806f4dc655f8ee4a74818a19a2
- 3600
- sha256
- 16
-
-
-
-
-
- /api/
-
-
-
-
- /api/v1/
-
-
-
-
-
-
-
-
- WANGW
-
- wan
- ${IP_GATEWAY_WAN}
- WANGW
-
- inet
-
-
-
-
- 5
- enabled
-
-
- auto
- lan
- yes
- yes
-
-
-
-
-
-
-
-
-
- edison
- !
-
-
-
-
- kind: ConfigMap
- metadata:
- name: pfsense-vm-files
-
- ---
- apiVersion: v1
- data:
- credentials: Q1JFRFM6IEZST01fU0VDUkVUX1JFRg==
- kind: Secret
- metadata:
- name: account-creds
- type: Opaque
- ---
- apiVersion: terraform.core.oam.dev/v1beta1
- kind: Provider
- metadata:
- name: vsphere-custom
- spec:
- provider: custom
- credentials:
- source: Secret
- secretRef:
- name: account-creds
- key: credentials
- ---
- apiVersion: terraform.core.oam.dev/v1beta1
- kind: Configuration
- metadata:
- name: pfsense-gateway-vm-config
- namespace: jet-system
- spec:
- providerRef:
- name: vsphere-custom
- namespace: jet-system
- volumeSpec:
- volumeMounts:
- - name: spectro-directory
- mountPath: /opt/spectrocloud
- - mountPath: "/var/files"
- name: files-vol
- volumes:
- - name: files-vol
- configMap:
- name: pfsense-vm-files
- - name: spectro-directory
- hostPath:
- path: /opt/spectrocloud
- type: Directory
- deleteResource: true
- variableRef:
- - name: TF_VAR_VSPHERE_HOST
- valueFrom:
- secretKeyRef:
- key: vcenterServer
- name: vsphere-cloud-account
- - name: TF_VAR_USERNAME
- valueFrom:
- secretKeyRef:
- key: username
- name: vsphere-cloud-account
- - name: TF_VAR_PASSWORD
- valueFrom:
- secretKeyRef:
- key: password
- name: vsphere-cloud-account
- - name: TF_VAR_INSECURE
- valueFrom:
- secretKeyRef:
- key: insecure
- name: vsphere-cloud-account
- variable:
- VM_NAME: "pfsense-gateway-vm"
- NTP: FROM_SECRET_REF
- DNS_1: FROM_SECRET_REF
- DNS_2: FROM_SECRET_REF
- IP_ADDR_WAN: FROM_SECRET_REF
- IP_GATEWAY_WAN: FROM_SECRET_REF
- SUBNET_WAN: FROM_SECRET_REF
- IP_ADDR_LAN: FROM_SECRET_REF
- SUBNET_LAN: FROM_SECRET_REF
- DHCP_RANGE_START: FROM_SECRET_REF
- DHCP_RANGE_END: FROM_SECRET_REF
- DATACENTER: FROM_SECRET_REF
- DATASTORE: FROM_SECRET_REF
- RESOURCE_POOL: FROM_SECRET_REF
- WAN_NETWORK: FROM_SECRET_REF
- LAN_NETWORK: FROM_SECRET_REF
- VM_TEMPLATE_NAME: FROM_SECRET_REF
- FOLDER: FROM_SECRET_REF
- hcl: |
- provider "vsphere" {
- user = var.USERNAME
- password = var.PASSWORD
- vsphere_server = var.VSPHERE_HOST
-
- allow_unverified_ssl = tobool(var.INSECURE)
- }
-
- variable "USERNAME" {
- type = string
- }
-
- variable "PASSWORD" {
- type = string
- }
-
- variable "VSPHERE_HOST" {
- type = string
- }
-
- variable "INSECURE" {
- type = string
- default = "true"
- }
-
- variable "NTP" {
- type = string
- }
-
- variable "DNS_1" {
- type = string
- }
-
- variable "DNS_2" {
- type = string
- }
-
- variable "IP_ADDR_WAN" {
- type = string
- }
-
- variable "SUBNET_WAN" {
- type = string
- }
-
- variable "IP_GATEWAY_WAN" {
- type = string
- }
-
- variable "IP_ADDR_LAN" {
- type = string
- }
-
- variable "SUBNET_LAN" {
- type = string
- }
-
- variable "DHCP_RANGE_START" {
- type = string
- }
-
- variable "DHCP_RANGE_END" {
- type = string
- }
-
- variable "DATACENTER" {
- type = string
- }
-
- variable "DATASTORE" {
- type = string
- }
-
- variable "RESOURCE_POOL" {
- type = string
- }
-
- variable "WAN_NETWORK" {
- type = string
- }
-
- variable "LAN_NETWORK" {
- type = string
- }
-
- variable "VM_TEMPLATE_NAME" {
- type = string
- }
-
- variable "VM_NAME" {
- type = string
- }
-
- variable "FOLDER" {
- type = string
- }
-
- ##### OUTPUT #####
- output "config_value" {
- value = data.template_file.config.rendered
- }
-
- output "user_data_value" {
- value = data.template_file.user_data.rendered
- }
-
- ##### PROVIDER #####
- data "template_file" "config" {
- template = file("/var/files/config-xml.tmpl")
- vars = {
- NTP = var.NTP
- DNS_1 = var.DNS_1
- DNS_2 = var.DNS_2
- IP_ADDR_WAN = var.IP_ADDR_WAN
- SUBNET_WAN = var.SUBNET_WAN
- IP_GATEWAY_WAN = var.IP_GATEWAY_WAN
- IP_ADDR_LAN = var.IP_ADDR_LAN
- SUBNET_LAN = var.SUBNET_LAN
- DHCP_RANGE_START = var.DHCP_RANGE_START
- DHCP_RANGE_END = var.DHCP_RANGE_END
- }
- }
-
- data "template_file" "user_data" {
- template = file("/var/files/user-data.tmpl")
- vars = {
- CONFIG_XML = base64encode(data.template_file.config.rendered)
- }
- }
-
- data "vsphere_datacenter" "dc" {
- name = var.DATACENTER
- }
-
- data "vsphere_datastore" "datastore" {
- name = var.DATASTORE
- datacenter_id = data.vsphere_datacenter.dc.id
- }
-
- data "vsphere_resource_pool" "pool" {
- name = var.RESOURCE_POOL
- datacenter_id = data.vsphere_datacenter.dc.id
- }
-
- data "vsphere_network" "wan_network" {
- name = var.WAN_NETWORK
- datacenter_id = data.vsphere_datacenter.dc.id
- }
-
- data "vsphere_network" "lan_network" {
- name = var.LAN_NETWORK
- datacenter_id = data.vsphere_datacenter.dc.id
- }
-
- data "vsphere_virtual_machine" "template" {
- name = var.VM_TEMPLATE_NAME
- datacenter_id = data.vsphere_datacenter.dc.id
- }
-
- resource "vsphere_virtual_machine" "vm" {
- name = var.VM_NAME
- resource_pool_id = data.vsphere_resource_pool.pool.id
- datastore_id = data.vsphere_datastore.datastore.id
- folder = var.FOLDER
-
- wait_for_guest_net_timeout = 0
-
- num_cpus = 2
- memory = 4096
- guest_id = "freebsd12_64Guest"
- scsi_type = "lsilogic"
-
- network_interface {
- network_id = data.vsphere_network.lan_network.id
- }
-
- network_interface {
- network_id = data.vsphere_network.wan_network.id
- }
-
- cdrom {
- client_device = true
- }
-
- disk {
- label = var.VM_TEMPLATE_NAME
- size = data.vsphere_virtual_machine.template.disks.0.size
- eagerly_scrub = data.vsphere_virtual_machine.template.disks.0.eagerly_scrub
- thin_provisioned = data.vsphere_virtual_machine.template.disks.0.thin_provisioned
- }
-
- clone {
- template_uuid = data.vsphere_virtual_machine.template.id
- }
-
- extra_config = {
- "guestinfo.userdata" = base64encode(data.template_file.user_data.rendered)
- "guestinfo.userdata.encoding" = "base64"
- "guestinfo.metadata" = <<-EOT
- {
- "instance-id": ${var.VM_NAME}
- }
- EOT
- }
- }
diff --git a/examples/local-simplified-yaml/project-vsphere-with-creds-201/main.tf b/examples/local-simplified-yaml/project-vsphere-with-creds-201/main.tf
deleted file mode 100644
index 073226b..0000000
--- a/examples/local-simplified-yaml/project-vsphere-with-creds-201/main.tf
+++ /dev/null
@@ -1,72 +0,0 @@
-terraform {
- required_version = ">= 0.14.0"
-
- required_providers {
- spectrocloud = {
- version = "= 0.6.10-pre"
- source = "spectrocloud/spectrocloud"
- }
- }
-}
-
-variable "sc_host" {}
-variable "sc_api_key" {
- sensitive = true
-}
-variable "sc_project_name" {}
-
-provider "spectrocloud" {
- host = var.sc_host
- api_key = var.sc_api_key
- project_name = var.sc_project_name
-}
-
-locals {
- profiles = {
- for k in fileset("config/profile", "profile-*.yaml") :
- trimsuffix(k, ".yaml") => yamldecode(file("config/profile/${k}"))
- }
-
- appliances = {
- for k in fileset("config/appliance", "appliance-*.yaml") :
- trimsuffix(k, ".yaml") => yamldecode(file("config/appliance/${k}"))
- }
-
- clusters = {
- for k in fileset("config/cluster", "cluster-*.yaml") :
- trimsuffix(k, ".yaml") => yamldecode(file("config/cluster/${k}"))
- }
-}
-
-module "SpectroSystemProfile" {
- source = "github.com/spectrocloud/terraform-spectrocloud-modules"
-
- # It is recommended to use latest version of module instead of using latest from github
- #source = "spectrocloud/modules/spectrocloud"
- #version = "0.0.7"
-
- profiles = local.profiles
-}
-
-module "SpectroAppliance" {
- depends_on = [module.SpectroSystemProfile]
- source = "github.com/spectrocloud/terraform-spectrocloud-modules"
-
- # It is recommended to use latest version of module instead of using latest from github
- #source = "spectrocloud/modules/spectrocloud"
- #version = "0.0.7"
-
- appliances = local.appliances
-}
-
-module "SpectroCluster" {
- depends_on = [module.SpectroAppliance]
- source = "github.com/spectrocloud/terraform-spectrocloud-modules"
- //source = "/Users/rishi/work/git_clones/terraform-spectrocloud-modules"
-
- # It is recommended to use latest version of module instead of using latest from github
- #source = "spectrocloud/modules/spectrocloud"
- #version = "0.0.7"
-
- clusters = local.clusters
-}
diff --git a/examples/local-simplified-yaml/project-vsphere-with-creds-201/terraform.template.tfvars b/examples/local-simplified-yaml/project-vsphere-with-creds-201/terraform.template.tfvars
deleted file mode 100644
index 3c4423e..0000000
--- a/examples/local-simplified-yaml/project-vsphere-with-creds-201/terraform.template.tfvars
+++ /dev/null
@@ -1,4 +0,0 @@
-# Credentials
-sc_host = "{enter Spectro Cloud host, blank for SaaS}"
-sc_api_key = "{enter Spectro Cloud API Key}"
-sc_project_name = "{enter Spectro Cloud Project Name}"
\ No newline at end of file
diff --git a/examples/local-simplified-yaml/project-vsphere-without-creds-202/README.md b/examples/local-simplified-yaml/project-vsphere-without-creds-202/README.md
deleted file mode 100644
index 8502089..0000000
--- a/examples/local-simplified-yaml/project-vsphere-without-creds-202/README.md
+++ /dev/null
@@ -1 +0,0 @@
-Vsphere without creds appliance project
\ No newline at end of file
diff --git a/examples/local-simplified-yaml/project-vsphere-without-creds-202/config/appliance/appliance-hospital-202.yaml b/examples/local-simplified-yaml/project-vsphere-without-creds-202/config/appliance/appliance-hospital-202.yaml
deleted file mode 100644
index 9627e8e..0000000
--- a/examples/local-simplified-yaml/project-vsphere-without-creds-202/config/appliance/appliance-hospital-202.yaml
+++ /dev/null
@@ -1 +0,0 @@
-id: "hospital-202"
diff --git a/examples/local-simplified-yaml/project-vsphere-without-creds-202/config/cluster/cluster-hospital-202.yaml b/examples/local-simplified-yaml/project-vsphere-without-creds-202/config/cluster/cluster-hospital-202.yaml
deleted file mode 100644
index b1d843a..0000000
--- a/examples/local-simplified-yaml/project-vsphere-without-creds-202/config/cluster/cluster-hospital-202.yaml
+++ /dev/null
@@ -1,26 +0,0 @@
-name: hospital-202
-tags:
-- "skip_completion"
-cloudType: edge
-profiles:
- infra:
- name: without-creds-infra
- system:
- name: hospital-202-system-profile
- addons:
- - name: spectro-core
-cloud_config:
- ssh_key: "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQCr3hE9IS5UUDPqNOiEWVJvVDS0v57QKjb1o9ubvvATQNg2T3x+inckfzfgX3et1H9X1oSp0FqY1+Mmy5nfTtTyIj5Get1cntcC4QqdZV8Op1tmpI01hYMj4lLn55WNaXgDt+35tJ47kWRr5RqTGV05MPNWN3klaVsePsqa+MgCjnLfCBiOz1tpBOgxqPNqtQPXh+/T/Ul6ZDUW/rySr9iNR9uGd04tYzD7wdTdvmZSRgWEre//IipNzMnnZC7El5KJCQn8ksF+DYY9eT9NtNFEMALTZC6hn8BnMc14zqxoJP/GNHftmig8TJC500Uofdr4OKTCRr1JwHS79Cx9LyZdAp/1D8mL6bIMyGOTPVQ8xUpmEYj77m1kdiCHCk22YtLyfUWuQ0SC+2p1soDoNfJUpmxcKboOTZsLq1HDCFrqSyLUWS1PrYZ/MzhsPrsDewB1iHLbYDt87r2odJOpxMO1vNWMOYontODdr5JPKBpCcd/noNyOy/m4Spntytfb/J3kM1oz3dpPfN0xXmC19uR1xHklmbtg1j784IMu7umI2ZCpUwLADAodkbxmbacdkp5I+1NFgrFamvnTjjQAvRexV31m4m9GielKFQ4tCCId2yagMBWRFn5taEhb3SKnRxBcAzaJLopUyErOtqxvSywGvb53v4MEShqBaQSUv4gHfw== spectro2022"
-node_groups:
- - name: master-pool
- control_plane: true
- count: 1
- placements:
- - appliance: "hospital-202"
-
- - name: worker-pool
- count: 3
- placements:
- - appliance: "hospital-202"
-
-
diff --git a/examples/local-simplified-yaml/project-vsphere-without-creds-202/config/profile/profile-system.yaml b/examples/local-simplified-yaml/project-vsphere-without-creds-202/config/profile/profile-system.yaml
deleted file mode 100644
index 985a8fa..0000000
--- a/examples/local-simplified-yaml/project-vsphere-without-creds-202/config/profile/profile-system.yaml
+++ /dev/null
@@ -1,321 +0,0 @@
-name: hospital-202-system-profile
-description: system-profile
-type: system
-cloudType: all
-packs:
- - name: "fluentbit"
- registry: Public Repo
- type: "spectro"
- layer: "addon"
- version: "1.3.5"
- values: |
- pack:
- #The namespace (on the target cluster) to install this chart
- #When not found, a new namespace will be created
- namespace: "fluent-bit"
- content:
- images:
- - image: docker.io/fluent/fluent-bit:1.3.5
-
- charts:
- fluent-bit:
-
- fullnameOverride: "fluent-bit"
- nameOverride: ""
- on_minikube: false
-
- image:
- fluent_bit:
- repository: fluent/fluent-bit
- tag: 1.3.5
- pullPolicy: Always
-
- # When enabled, exposes json and prometheus metrics on {{ .Release.Name }}-metrics service
- metrics:
- enabled: false
- service:
- # labels:
- # key: value
- annotations: {}
- # In order for Prometheus to consume metrics automatically use the following annotations:
- # prometheus.io/path: "/api/v1/metrics/prometheus"
- # prometheus.io/port: "2020"
- # prometheus.io/scrape: "true"
- port: 2020
- type: ClusterIP
- serviceMonitor:
- enabled: false
- additionalLabels: {}
- # namespace: monitoring
- # interval: 30s
- # scrapeTimeout: 10s
-
- # When enabled, fluent-bit will keep track of tailing offsets across pod restarts.
- trackOffsets: false
-
- ## PriorityClassName
- ## Ref: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/#priorityclass
- priorityClassName: ""
-
- backend:
- type: es
- forward:
- host: fluentd
- port: 24284
- tls: "off"
- tls_verify: "on"
- tls_debug: 1
- shared_key:
- es:
- host: elasticsearch-client
- port: 9200
- # Elastic Index Name
- index: fluent-bit
- type: flb_type
- logstash_prefix: fluent-bit
- replace_dots: "On"
- logstash_format: "On"
- retry_limit: "False"
- time_key: "@timestamp"
- # Optional username credential for Elastic X-Pack access
- http_user:
- # Password for user defined in HTTP_User
- http_passwd:
- # Optional TLS encryption to ElasticSearch instance
- tls: "off"
- tls_verify: "on"
- # TLS certificate for the Elastic (in PEM format). Use if tls=on and tls_verify=on.
- tls_ca: ""
- # TLS debugging levels = 1-4
- tls_debug: 1
- splunk:
- host: 127.0.0.1
- port: 8088
- token: ""
- send_raw: "on"
- tls: "on"
- tls_verify: "off"
- tls_debug: 1
- message_key: "kubernetes"
- stackdriver: {}
-
- ##
- ## Ref: http://fluentbit.io/documentation/current/output/http.html
- ##
- http:
- host: 127.0.0.1
- port: 80
- uri: "/"
- http_user:
- http_passwd:
- tls: "off"
- tls_verify: "on"
- tls_debug: 1
- ## Specify the data format to be used in the HTTP request body
- ## Can be either 'msgpack' or 'json'
- format: msgpack
- headers: []
-
- parsers:
- enabled: false
- ## List the respective parsers in key: value format per entry
- ## Regex required fields are name and regex. JSON and Logfmt required field
- ## is name.
- regex: []
- logfmt: []
- ## json parser config can be defined by providing an extraEntries field.
- ## The following entry:
- ## json:
- ## - extraEntries: |
- ## Decode_Field_As escaped log do_next
- ## Decode_Field_As json log
- ##
- ## translates into
- ##
- ## Command | Decoder | Field | Optional Action |
- ## ==============|===========|=======|===================|
- ## Decode_Field_As escaped log do_next
- ## Decode_Field_As json log
- ##
- json: []
-
- env: []
-
- ## Annotations to add to the DaemonSet's Pods
- podAnnotations: {}
-
- ## By default there different 'files' provides in the config
- ## (fluent-bit.conf, custom_parsers.conf). This defeats
- ## changing a configmap (since it uses subPath). If this
- ## variable is set, the user is assumed to have provided,
- ## in 'existingConfigMap' the entire config (etc/*) of fluent-bit,
- ## parsers and system config. In this case, no subPath is
- ## used
- fullConfigMap: false
-
- ## ConfigMap override where fullname is {{.Release.Name}}-{{.Values.existingConfigMap}}
- ## Defining existingConfigMap will cause templates/config.yaml
- ## to NOT generate a ConfigMap resource
- ##
- existingConfigMap: ""
-
-
- # NOTE If you want to add extra sections, add them here, inbetween the includes,
- # wherever they need to go. Sections order matters.
-
- rawConfig: |-
- @INCLUDE fluent-bit-service.conf
- @INCLUDE fluent-bit-input.conf
- @INCLUDE fluent-bit-filter.conf
- @INCLUDE fluent-bit-output.conf
-
-
- # WARNING!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
- # This is to add extra entries to an existing section, NOT for adding new sections
- # Do not submit bugs against indent being wrong. Add your new sections to rawConfig
- # instead.
- #
- extraEntries:
- input: |-
- # # >=1 additional Key/Value entrie(s) for existing Input section
- audit: |-
- # # >=1 additional Key/Value entrie(s) for existing Input section
- filter: |-
- # # >=1 additional Key/Value entrie(s) for existing Filter section
- output: |-
- # # >=1 additional Key/Value entrie(s) for existing Ouput section
- # WARNING!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
-
-
- ## Extra ports to add to the daemonset ports section
- extraPorts: []
-
- ## Extra volumes containing additional files required for fluent-bit to work
- ## (eg. CA certificates)
- ## Ref: https://kubernetes.io/docs/concepts/storage/volumes/
- ##
- extraVolumes: []
-
- ## Extra volume mounts for the fluent-bit pod.
- ## Ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-volume-storage/
- ##
- extraVolumeMounts: []
-
- resources:
- limits:
- cpu: 200m
- memory: 128Mi
- requests:
- cpu: 10m
- memory: 8Mi
-
- # When enabled, pods will bind to the node's network namespace.
- hostNetwork: false
-
- # Which DNS policy to use for the pod.
- # Consider switching to 'ClusterFirstWithHostNet' when 'hostNetwork' is enabled.
- dnsPolicy: ClusterFirst
-
- ## Node tolerations for fluent-bit scheduling to nodes with taints
- ## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/
- ##
- # Fluent bit is configured to run on all master and worker nodes
- tolerations:
- - key: node-role.kubernetes.io/master
- operator: Exists
- effect: NoSchedule
- - operator: "Exists"
- effect: "NoExecute"
- - operator: "Exists"
- effect: "NoSchedule"
-
- ## Node labels for fluent-bit pod assignment
- ## Ref: https://kubernetes.io/docs/user-guide/node-selection/
- ##
- nodeSelector: {}
- affinity: {}
-
- service:
- flush: 1
- logLevel: info
-
- input:
- tail:
- memBufLimit: 5MB
- parser: docker
- path: /var/log/containers/*.log
- ignore_older: ""
- systemd:
- enabled: false
- filters:
- systemdUnit:
- - docker.service
- - kubelet.service
- - node-problem-detector.service
- maxEntries: 1000
- readFromTail: true
- stripUnderscores: false
- tag: host.*
-
- audit:
- enable: false
- input:
- memBufLimit: 35MB
- parser: docker
- path: /var/log/kube-apiserver-audit.log
- bufferChunkSize: 2MB
- bufferMaxSize: 10MB
- skipLongLines: On
- key: kubernetes-audit
-
- filter:
- kubeURL: https://kubernetes.default.svc:443
- kubeCAFile: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt
- kubeTokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token
- kubeTag: kube
- kubeTagPrefix: kube.var.log.containers.
-
- # If true, check to see if the log field content is a JSON string map, if so,
- # it append the map fields as part of the log structure.
- mergeJSONLog: true
-
- # If set, all unpacked keys from mergeJSONLog (Merge_Log) will be packed under
- # the key name specified on mergeLogKey (Merge_Log_Key)
- mergeLogKey: ""
-
- # If true, enable the use of monitoring for a pod annotation of
- # fluentbit.io/parser: parser_name. parser_name must be the name
- # of a parser contained within parsers.conf
- enableParser: true
-
- # If true, enable the use of monitoring for a pod annotation of
- # fluentbit.io/exclude: true. If present, discard logs from that pod.
- enableExclude: true
-
- rbac:
- # Specifies whether RBAC resources should be created
- create: true
- # Specifies whether a PodSecurityPolicy should be created
- pspEnabled: false
-
- taildb:
- directory: /var/lib/fluent-bit
-
- serviceAccount:
- # Specifies whether a ServiceAccount should be created
- create: true
- # The name of the ServiceAccount to use.
- # If not set and create is true, a name is generated using the fullname template
- name:
-
- ## Specifies security settings for a container
- ## Ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-container
- securityContext: {}
- # securityContext:
- # privileged: true
-
- ## Specifies security settings for a pod
- ## Ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod
- podSecurityContext: {}
- # podSecurityContext:
- # runAsUser: 1000
\ No newline at end of file
diff --git a/examples/local-simplified-yaml/project-vsphere-without-creds-202/main.tf b/examples/local-simplified-yaml/project-vsphere-without-creds-202/main.tf
deleted file mode 100644
index 073226b..0000000
--- a/examples/local-simplified-yaml/project-vsphere-without-creds-202/main.tf
+++ /dev/null
@@ -1,72 +0,0 @@
-terraform {
- required_version = ">= 0.14.0"
-
- required_providers {
- spectrocloud = {
- version = "= 0.6.10-pre"
- source = "spectrocloud/spectrocloud"
- }
- }
-}
-
-variable "sc_host" {}
-variable "sc_api_key" {
- sensitive = true
-}
-variable "sc_project_name" {}
-
-provider "spectrocloud" {
- host = var.sc_host
- api_key = var.sc_api_key
- project_name = var.sc_project_name
-}
-
-locals {
- profiles = {
- for k in fileset("config/profile", "profile-*.yaml") :
- trimsuffix(k, ".yaml") => yamldecode(file("config/profile/${k}"))
- }
-
- appliances = {
- for k in fileset("config/appliance", "appliance-*.yaml") :
- trimsuffix(k, ".yaml") => yamldecode(file("config/appliance/${k}"))
- }
-
- clusters = {
- for k in fileset("config/cluster", "cluster-*.yaml") :
- trimsuffix(k, ".yaml") => yamldecode(file("config/cluster/${k}"))
- }
-}
-
-module "SpectroSystemProfile" {
- source = "github.com/spectrocloud/terraform-spectrocloud-modules"
-
- # It is recommended to use latest version of module instead of using latest from github
- #source = "spectrocloud/modules/spectrocloud"
- #version = "0.0.7"
-
- profiles = local.profiles
-}
-
-module "SpectroAppliance" {
- depends_on = [module.SpectroSystemProfile]
- source = "github.com/spectrocloud/terraform-spectrocloud-modules"
-
- # It is recommended to use latest version of module instead of using latest from github
- #source = "spectrocloud/modules/spectrocloud"
- #version = "0.0.7"
-
- appliances = local.appliances
-}
-
-module "SpectroCluster" {
- depends_on = [module.SpectroAppliance]
- source = "github.com/spectrocloud/terraform-spectrocloud-modules"
- //source = "/Users/rishi/work/git_clones/terraform-spectrocloud-modules"
-
- # It is recommended to use latest version of module instead of using latest from github
- #source = "spectrocloud/modules/spectrocloud"
- #version = "0.0.7"
-
- clusters = local.clusters
-}
diff --git a/examples/local-simplified-yaml/project-vsphere-without-creds-202/terraform.template.tfvars b/examples/local-simplified-yaml/project-vsphere-without-creds-202/terraform.template.tfvars
deleted file mode 100644
index 3c4423e..0000000
--- a/examples/local-simplified-yaml/project-vsphere-without-creds-202/terraform.template.tfvars
+++ /dev/null
@@ -1,4 +0,0 @@
-# Credentials
-sc_host = "{enter Spectro Cloud host, blank for SaaS}"
-sc_api_key = "{enter Spectro Cloud API Key}"
-sc_project_name = "{enter Spectro Cloud Project Name}"
\ No newline at end of file