diff --git a/.github/workflows/readme.yaml b/.github/workflows/readme.yaml index 66fcc56..b565bf4 100644 --- a/.github/workflows/readme.yaml +++ b/.github/workflows/readme.yaml @@ -1,6 +1,6 @@ # This Github Workflow will run on every push to the repository # and will test and validate the Kargo codebase using Kind Kubernetes. -name: CI Testing - Konductor on Talos +name: Runme Kargo Quickstart CI on: push diff --git a/docs/hack/talos-br0.vmpool.yaml b/docs/hack/talos-br0.vmpool.yaml index 3fe6976..b2c77cc 100644 --- a/docs/hack/talos-br0.vmpool.yaml +++ b/docs/hack/talos-br0.vmpool.yaml @@ -1,61 +1,77 @@ ---- apiVersion: pool.kubevirt.io/v1alpha1 kind: VirtualMachinePool metadata: name: talos spec: - replicas: 3 + replicas: 0 selector: matchLabels: kubevirt.io/vmpool: talos virtualMachineTemplate: metadata: - creationTimestamp: null labels: kubevirt.io/vmpool: talos spec: running: true template: metadata: - creationTimestamp: null labels: kubevirt.io/vmpool: talos spec: - architecture: amd64 + networks: + - name: eth0 + multus: + networkName: br0 domain: + cpu: + cores: 1 + resources: + requests: + memory: 2Gi devices: - disks: - - disk: - bus: virtio - bootOrder: 1 - name: talosdisk interfaces: - name: eth0 bridge: {} - resources: - cpu: - cores: 2 - requests: - memory: 4Gi - networks: - - name: eth0 - multus: - networkName: br0 - terminationGracePeriodSeconds: 0 + disks: + - name: talos-root-disk + bootOrder: 1 + disk: + bus: virtio + - name: talos-empty-disk + disk: + bus: virtio volumes: - - dataVolume: - name: talosdisk - name: talosdisk + - name: talos-root-disk + dataVolume: + name: talos-root-dv + - name: talos-empty-disk + dataVolume: + name: talos-empty-dv + # - name: talos-root-disk + # containerDisk: + # image: docker.io/containercraft/talos:latest + # imagePullPolicy: Always dataVolumeTemplates: - metadata: - name: talosdisk + name: talos-root-dv + spec: + storage: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 32Gi + source: + registry: + url: "docker://docker.io/containercraft/talos:1.7.6" + - metadata: + name: talos-empty-dv spec: storage: accessModes: - ReadWriteOnce resources: requests: - storage: 20Gi + storage: 4Gi source: - http: - url: "https://factory.talos.dev/image/79c3d43dd90861d1ffdd379da3e5a6e7fccdb36be434d4b3749ecae8882b08a9/v1.7.6/nocloud-amd64.raw.xz" + blank: {} diff --git a/docs/hack/talos-datavolume.yaml b/docs/hack/talos-datavolume.yaml new file mode 100644 index 0000000..80f4c7f --- /dev/null +++ b/docs/hack/talos-datavolume.yaml @@ -0,0 +1,42 @@ +--- +apiVersion: cdi.kubevirt.io/v1beta1 +kind: DataVolume +metadata: + name: talos-disk-image-1.7.6 + namespace: default +spec: + source: + http: + url: "https://github.com/siderolabs/talos/releases/download/v1.7.6/nocloud-amd64.raw.xz" + #registry: + # url: "docker://docker.io/containercraft/talos:latest" + pvc: + storageClassName: "ssd" + volumeMode: Filesystem + resources: + requests: + storage: 16Gi + accessModes: + - ReadWriteOnce +--- +apiVersion: cdi.kubevirt.io/v1beta1 +kind: DataVolume +metadata: + name: ubuntu-dv + namespace: default + annotations: + cdi.kubevirt.io/storage.usePopulator: "true" +spec: + pvc: + storageClassName: ssd + volumeMode: Filesystem + resources: + requests: + storage: 16Gi + accessModes: + - ReadWriteOnce + source: + registry: + url: docker://docker.io/containercraft/ubuntu:22.04 +# http: +# url: "https://github.com/siderolabs/talos/releases/download/v1.7.6/nocloud-amd64.raw.xz" diff --git a/docs/hack/talos.vm.yaml b/docs/hack/talos.vm.yaml new file mode 100644 index 0000000..63b7ad2 --- /dev/null +++ b/docs/hack/talos.vm.yaml @@ -0,0 +1,124 @@ +#--- +#apiVersion: kubevirt.io/v1 +#kind: VirtualMachine +#metadata: +# name: talos +# namespace: default +#spec: +# running: true +# template: +# spec: +# domain: +# clock: +# utc: {} +# timer: +# hpet: +# present: false +# pit: +# tickPolicy: delay +# rtc: +# tickPolicy: catchup +# cpu: +# cores: 1 +# sockets: 1 +# threads: 4 +# dedicatedCpuPlacement: false +# machine: +# type: q35 +# memory: +# guest: 8Gi +# features: +# acpi: +# enabled: true +# smm: +# enabled: true +# firmware: +# bootloader: +# efi: +# secureBoot: false +# devices: +# rng: {} +# autoattachSerialConsole: true +# autoattachGraphicsDevice: true +# networkInterfaceMultiqueue: false +# autoattachPodInterface: false +# disks: +# - name: vda +# bootOrder: 1 +# disk: +# bus: virtio +# interfaces: +# - name: eth0 +# model: virtio +# macAddress: 6a:7d:62:36:fc:a0 +# bridge: {} +# networks: +# - name: eth0 +# multus: +# networkName: br0 +# volumes: +# - name: vda +# dataVolume: +# name: talos-disk-image-1.7.6 +# terminationGracePeriodSeconds: 0 +--- +apiVersion: pool.kubevirt.io/v1alpha1 +kind: VirtualMachinePool +metadata: + name: talos +spec: + replicas: 1 + selector: + matchLabels: + kubevirt.io/vmpool: talos + virtualMachineTemplate: + metadata: + creationTimestamp: null + labels: + kubevirt.io/vmpool: talos + spec: + running: true + template: + metadata: + creationTimestamp: null + labels: + kubevirt.io/vmpool: talos + spec: + architecture: amd64 + domain: + cpu: + cores: 2 + devices: + disks: + - disk: + bus: virtio + bootOrder: 1 + name: talosdisk + interfaces: + - name: eth0 + bridge: {} + resources: + requests: + memory: 4Gi + networks: + - name: eth0 + multus: + networkName: br0 + terminationGracePeriodSeconds: 0 + volumes: + - name: talosdisk + dataVolume: + name: talosrootdisk + dataVolumeTemplates: + - metadata: + name: talosrootdisk + spec: + storage: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 20Gi + source: + registry: + url: "ghcr.io/containercraft/talos:latest" diff --git a/docs/metal/optiplexprime/README.md b/docs/metal/optiplexprime/README.md index f29a387..b53b8e3 100644 --- a/docs/metal/optiplexprime/README.md +++ b/docs/metal/optiplexprime/README.md @@ -5,6 +5,7 @@ > Note: All commands run from the included Github Codespaces Devcontainer environment ### 1. Wipe all block device partitions and partition tables + ### 2. Boot Omni Talos on the node(s) a. Download Talos ISO from Omni Dashboard Image Factory @@ -27,26 +28,46 @@ d. Verify connection to Omni Console > Machines ### 3. Pulumi Login & Prep -```bash +1. Pulumi Login + +```bash {"id":"01J6MTD7H4YQQQ41E9FH168RV6"} # Login pulumi login # Init Pulumi ESC Emvironment for local config and env -eval $(pulumi env open --format=shell kargo) +eval $(pulumi env open --format=shell optiplexprime) + +# create the organization and project stack +pulumi stack select --create usrbinkat/kargo/optiplexprime + +# or select the stack +pulumi stack select usrbinkat/kargo/optiplexprime ``` -### 4. Omnictl Login & Prep +2. Omni CLI Login -```bash +```bash {"id":"01J6MTD7H4YQQQ41E9FMD739CK"} # Run command to login by following along with the prompts omnictl get machines ``` +3. Kubectl Login + +```bash {"id":"01J6MTD7H4YQQQ41E9FP8105X7"} +kubectl get nodes -owide +``` + +4. Talosctl Login + +```bash {"id":"01J6MTD7H4YQQQ41E9FR05PQCT"} +talosctl --nodes $(kubectl get nodes | awk '/talos-/{print $1}' | head -n1) dashboard +``` + ### 5. Create Cluster Omni Talos Cluster a. Apply cluster template with omnictl -```bash +```bash {"id":"01J6MTD7H4YQQQ41E9FS4Y32HQ"} # Validate Cluster Template omnictl cluster template validate -f docs/metal/optiplexprime/cluster.yaml @@ -59,71 +80,93 @@ omnictl cluster template status -f docs/metal/optiplexprime/cluster.yaml ![](.assets/image-4.png) -2. Test Kubectl Access - - * Download and add the `--skip-open-browser` flag to the kubeconfig oidc-login command arguments - * I added this Kubeconfig to my Pulumi ESC environment so it loads from the `eval $(pulumi env open --format=shell kargo)` command. - ```yaml - apiVersion: v1 - kind: Config - clusters: - - cluster: - server: https://usrbinkat.kubernetes.omni.siderolabs.io - name: usrbinkat-optiplexprime - contexts: - - context: - cluster: usrbinkat-optiplexprime - namespace: default - user: usrbinkat-optiplexprime-kathryn.morgan@braincraft.io - name: usrbinkat-optiplexprime - current-context: usrbinkat-optiplexprime - users: - - name: usrbinkat-optiplexprime-kathryn.morgan@braincraft.io - user: - exec: - apiVersion: client.authentication.k8s.io/v1beta1 - args: - - oidc-login - - get-token - - --oidc-issuer-url=https://usrbinkat.omni.siderolabs.io/oidc - - --oidc-client-id=native - - --oidc-extra-scope=cluster:optiplexprime - - --skip-open-browser - command: kubectl - env: null - provideClusterInfo: false - ``` - - * Also add TalosConfig to Pulumi ESC Environment - - ```yaml - context: usrbinkat-optiplexprime - contexts: - usrbinkat-optiplexprime: - endpoints: - - https://usrbinkat.omni.siderolabs.io - auth: - siderov1: - identity: kathryn.morgan@braincraft.io - cluster: optiplexprime - ``` - -```bash +### 6. Configure Pulumi ESC + +* talosconfig +* omniconfig +* kubeconfig +* Download and add the `--skip-open-browser` flag to the kubeconfig oidc-login command arguments + +```yaml {"id":"01J6QBPTVAZFXE26XEFYE0X1RF"} +values: + sidero: + talosconfig: | + context: usrbinkat-optiplexprime + contexts: + usrbinkat-optiplexprime: + endpoints: + - https://usrbinkat.omni.siderolabs.io + auth: + siderov1: + identity: kathryn.morgan@braincraft.io + cluster: optiplexprime + omniconfig: | + context: default + contexts: + default: + url: https://usrbinkat.omni.siderolabs.io + auth: + siderov1: + identity: kathryn.morgan@braincraft.io + kubernetes: + kubeconfig: | + apiVersion: v1 + kind: Config + clusters: + - cluster: + server: https://usrbinkat.kubernetes.omni.siderolabs.io + name: usrbinkat-optiplexprime + contexts: + - context: + cluster: usrbinkat-optiplexprime + namespace: default + user: usrbinkat-optiplexprime-kathryn.morgan@braincraft.io + name: usrbinkat-optiplexprime + current-context: usrbinkat-optiplexprime + users: + - name: usrbinkat-optiplexprime-kathryn.morgan@braincraft.io + user: + exec: + apiVersion: client.authentication.k8s.io/v1beta1 + args: + - oidc-login + - get-token + - --oidc-issuer-url=https://usrbinkat.omni.siderolabs.io/oidc + - --oidc-client-id=native + - --oidc-extra-scope=cluster:optiplexprime + - --skip-open-browser + command: kubectl + env: null + provideClusterInfo: false + environmentVariables: + BROWSER: echo + pulumiConfig: + kubeconfig: ${kubernetes.kubeconfig} + files: + KUBECONFIG: ${kubernetes.kubeconfig} + OMNICONFIG: ${sidero.omniconfig} + TALOSCONFIG: ${sidero.talosconfig} +``` + +```bash {"id":"01J6MTD7H4YQQQ41E9G092KCPQ"} # Get Pods kubectl get pods -A ``` ### 6. Deploy Kargo Platform -```bash +```bash {"id":"01J6MTD7H4YQQQ41E9G12YHVMJ"} # Create a new Pulumi stack & set kube context -pulumi stack select --create kargo +pulumi stack select --create usrbinkat/kargo/optiplexprime # Explicitly set kube context pulumi config set --path kubernetes.context usrbinkat-optiplexprime +# Enable Ubuntu VM instance +pulumi config set --path vm.enabled true + # Deploy Kargo Platform (note: repeat command until all resources are healthy) -pulumi up --skip-preview --refresh=true; pulumi up --skip-preview --refresh=true; pulumi up --skip-preview --refresh=true +pulumi up --skip-preview --refresh=true --continue-on-error; pulumi up --skip-preview --refresh=true --continue-on-error; pulumi up --skip-preview --refresh=false ``` ### 7. **Deploy a Virtual Machine:** @@ -132,7 +175,7 @@ Deploy an Ubuntu Virtual Machine on the platform using Kubevirt. > **Note:** Run this step manually via integrated terminal. -```bash {"excludeFromRunAll":"true","id":"","name":"vm"} +```bash {"excludeFromRunAll":"true","id":"01J6MTD7H4YQQQ41E9G20FW0GC","name":"vm"} # Enable the VM instance pulumi config set --path vm.enabled true @@ -142,7 +185,7 @@ pulumi up --skip-preview --refresh=false ### 8. Deploy a tenant talos cluster -```bash +```bash {"id":"01J6MTD7H4YQQQ41E9G4KQ6F5E"} # change to the tenant talos dev directory cd metal/dev diff --git a/pulumi/__main__.py b/pulumi/__main__.py index f83b6e3..1866716 100644 --- a/pulumi/__main__.py +++ b/pulumi/__main__.py @@ -18,6 +18,7 @@ from src.kv_manager.deploy import deploy_ui_for_kubevirt from src.ceph.deploy import deploy_rook_operator from src.vm.ubuntu import deploy_ubuntu_vm +from src.vm.talos import deploy_talos_cluster ################################################################################## # Load the Pulumi Config @@ -74,29 +75,7 @@ def get_module_config(module_name): config_kubernetes_dashboard, kubernetes_dashboard_enabled = get_module_config('kubernetes_dashboard') config_kubevirt_manager, kubevirt_manager_enabled = get_module_config('kubevirt_manager') config_vm, vm_enabled = get_module_config('vm') - -################################################################################## -## Get the Kubernetes API endpoint IP -################################################################################## - -# check if kubernetes distribution is "kind" and if so execute get the kubernetes api endpoint ip -if kubernetes_distribution == "kind": - # Get the Kubernetes API endpoint IP - k8s_endpoint_ip = KubernetesApiEndpointIp( - "kubernetes-endpoint-service-address", - k8s_provider - ) - - # Extract the Kubernetes Endpoint clusterIP - kubernetes_endpoint_service = pulumi.Output.from_input(k8s_endpoint_ip) - kubernetes_endpoint_service_address = kubernetes_endpoint_service.endpoint.subsets[0].addresses[0].ip - pulumi.export( - "kubernetes-endpoint-service-address", - kubernetes_endpoint_service_address - ) -else: - # default to talos k8s endpoint "localhost" when not kind k8s - kubernetes_endpoint_service_address = "localhost" +config_talos, talos_cluster_enabled = get_module_config('talos') ################################################################################## ## Core Kargo Kubevirt PaaS Infrastructure @@ -107,8 +86,6 @@ def get_module_config(module_name): def safe_append(depends, resource): if resource: depends.append(resource) - #else: - # pulumi.log.warn("Attempted to append a None resource to depends list.") ################################################################################## # Fetch the Cilium Version @@ -136,13 +113,14 @@ def run_cilium(): safe_append(depends, cilium_release) + versions["cilium"] = {"enabled": cilium_enabled, "version": cilium_version} + return cilium_version, cilium_release + return None, None cilium_version, cilium_release = run_cilium() -versions["cilium"] = {"enabled": cilium_enabled, "version": cilium_version} - ################################################################################## # Fetch the Cert Manager Version # Deploy Cert Manager @@ -470,12 +448,10 @@ def run_kubevirt_manager(): # Deploy Ubuntu VM def run_ubuntu_vm(): if vm_enabled: - # Get the SSH Public Key string from Pulumi Config if it exists - # Otherwise, read the SSH Public Key from the local filesystem ssh_pub_key = config.get("ssh_pub_key") if not ssh_pub_key: - # Get the SSH public key + # Get the SSH public key from the local filesystem with open(f"{os.environ['HOME']}/.ssh/id_rsa.pub", "r") as f: ssh_pub_key = f.read().strip() @@ -508,10 +484,46 @@ def run_ubuntu_vm(): safe_append(depends, ubuntu_ssh_service) return ubuntu_vm, ubuntu_ssh_service - - return None, None + else: + return None, None ubuntu_vm, ubuntu_ssh_service = run_ubuntu_vm() +################################################################################## +# Deploy Kargo-on-Kargo Development Cluster (Controlplane + Worker VirtualMachinePools) +def run_talos_cluster(): + if talos_cluster_enabled: + # Append the resources to the `depends` list + custom_depends = [] + + # depends on cert manager, multus + safe_append(custom_depends, cert_manager_release) + safe_append(custom_depends, multus_release) + if cdi_enabled: + safe_append(custom_depends, cdi_release) + + # Deploy the Talos cluster (controlplane and workers) + controlplane_vm_pool, worker_vm_pool = deploy_talos_cluster( + config_talos=config_talos, + k8s_provider=k8s_provider, + depends_on=custom_depends, + parent=kubevirt_operator, + ) + + # Export the Talos configuration and versions + versions["talos_cluster"] = { + "enabled": talos_cluster_enabled, + "running": config_talos.get("running", True), + "controlplane": config_talos.get("controlplane", {}), + "workers": config_talos.get("workers", {}) + } + + return controlplane_vm_pool, worker_vm_pool + else: + return None, None + +# Run the Talos cluster deployment +talos_controlplane_vm_pool, talos_worker_vm_pool = run_talos_cluster() + # Export the component versions pulumi.export("versions", versions) diff --git a/pulumi/src/vm/talos.py b/pulumi/src/vm/talos.py new file mode 100644 index 0000000..a99ed84 --- /dev/null +++ b/pulumi/src/vm/talos.py @@ -0,0 +1,336 @@ +import pulumi +import pulumi_kubernetes as k8s + +def deploy_talos_cluster( + config_talos: dict, + k8s_provider: k8s.Provider, + depends_on: pulumi.Output[list], + parent + ): + """ + Deploy the Talos controlplane and worker VirtualMachinePools based on the provided configuration. + """ + + # Get configurations for controlplane and workers, with defaults applied + controlplane_config = get_talos_config(config_talos.get("controlplane", {}), "controlplane") + worker_config = get_talos_config(config_talos.get("workers", {}), "workers") + + # Apply the running flag to both configurations + controlplane_config["running"] = config_talos.get("running", True) + worker_config["running"] = config_talos.get("running", True) + + # Deploy the Talos controlplane + controlplane_vm_pool = deploy_talos_cluster_controlplane( + config_vm=controlplane_config, + k8s_provider=k8s_provider, + depends_on=depends_on, + parent=parent + ) + + # Deploy the Talos workers (if replicas > 0) + worker_vm_pool = None + if worker_config["replicas"] > 0: + worker_vm_pool = deploy_talos_cluster_workers( + config_vm=worker_config, + k8s_provider=k8s_provider, + depends_on=depends_on, + parent=parent + ) + + return controlplane_vm_pool, worker_vm_pool + +def get_talos_config( + config_talos_cluster: dict, + node_type: str + ) -> dict: + """ + Generate the Talos cluster configuration by merging common default values with node-specific config. + """ + # Common default configuration for both controlplane and workers + common_talos_defaults = { + "namespace": "default", + "image": config_talos_cluster.get("image", "docker.io/containercraft/talos:1.7.6"), + "network_name": "br0", # Default network + "running": True # Default running state + } + + # Set vm_pool_name for controlplane and workers + vm_pool_name = f"kargo-dev-{node_type}" + + # Handle controlplane configuration + if node_type == "controlplane": + controlplane_replicas = 1 # Default to single + controlplane_config = config_talos_cluster.get('replicas', 'single') + + if controlplane_config == 'single': + controlplane_replicas = 1 + elif controlplane_config == 'ha': + controlplane_replicas = 3 + else: + pulumi.log.error(f"Unrecognized controlplane replica config. Expected 'single' or 'ha', got: {controlplane_config}") + raise ValueError(f"Invalid controlplane config: {controlplane_config}") + + # Controlplane-specific defaults and configuration overrides + controlplane_defaults = { + "replicas": controlplane_replicas, + "cpu_cores": config_talos_cluster.get("cpu_cores", 1), + "memory_size": config_talos_cluster.get("memory_size", "2"), # Memory in GiB + "root_disk_size": config_talos_cluster.get("root_disk_size", "32"), # Root disk size in GiB + "empty_disk_size": config_talos_cluster.get("empty_disk_size", "0"), # Empty disk size in GiB + "vm_pool_name": vm_pool_name + } + return {**common_talos_defaults, **controlplane_defaults} + + # Handle worker configuration + elif node_type == "workers": + worker_defaults = { + "replicas": config_talos_cluster.get("replicas", 0), # Worker replicas + "cpu_cores": config_talos_cluster.get("cpu_cores", 2), # Worker CPU cores + "memory_size": config_talos_cluster.get("memory_size", "2"), # Worker memory in GiB + "root_disk_size": config_talos_cluster.get("root_disk_size", "32"), # Root disk size in GiB + "empty_disk_size": config_talos_cluster.get("empty_disk_size", "16"), # Empty disk size in GiB + "vm_pool_name": vm_pool_name + } + return {**common_talos_defaults, **worker_defaults} + + else: + raise ValueError(f"Unsupported node type: {node_type}") + +def deploy_talos_cluster_controlplane( + config_vm, + k8s_provider: k8s.Provider, + depends_on: pulumi.Output[list], + parent + ): + """ + Deploy the Talos cluster controlplane with specific configuration. + """ + vm_pool_spec = generate_talos_vmpool_spec( + vm_pool_name=config_vm["vm_pool_name"], + namespace=config_vm["namespace"], + replicas=config_vm["replicas"], # Controlplane replicas + cpu_cores=config_vm["cpu_cores"], + memory_size=config_vm["memory_size"], + root_disk_size=config_vm["root_disk_size"], + empty_disk_size=config_vm["empty_disk_size"], + image_address=config_vm["image"], + network_name=config_vm["network_name"], + running=config_vm["running"] + ) + + controlplane_vm_pool = k8s.apiextensions.CustomResource( + f"{config_vm['vm_pool_name']}", + api_version="pool.kubevirt.io/v1alpha1", + kind="VirtualMachinePool", + metadata=k8s.meta.v1.ObjectMetaArgs( + name=f"{config_vm['vm_pool_name']}", + namespace=config_vm["namespace"], + ), + spec=vm_pool_spec, + opts=pulumi.ResourceOptions( + provider=k8s_provider, + #depends_on=depends_on, + parent=parent + ) + ) + + return controlplane_vm_pool + +def deploy_talos_cluster_workers( + config_vm, + k8s_provider: k8s.Provider, + depends_on, + parent + ): + """ + Deploy the Talos workers with their specific configuration. + """ + if config_vm["replicas"] > 0: + worker_vm_pool_spec = generate_talos_vmpool_spec( + vm_pool_name=config_vm["vm_pool_name"], + namespace=config_vm["namespace"], + replicas=config_vm["replicas"], + cpu_cores=config_vm["cpu_cores"], + memory_size=config_vm["memory_size"], + root_disk_size=config_vm["root_disk_size"], + empty_disk_size=config_vm["empty_disk_size"], + image_address=config_vm["image"], + network_name=config_vm["network_name"], + running=config_vm["running"] + ) + + worker_vm_pool = k8s.apiextensions.CustomResource( + f"{config_vm['vm_pool_name']}-workers", + api_version="pool.kubevirt.io/v1alpha1", + kind="VirtualMachinePool", + metadata=k8s.meta.v1.ObjectMetaArgs( + name=f"{config_vm['vm_pool_name']}", + namespace=config_vm["namespace"], + ), + spec=worker_vm_pool_spec, + opts=pulumi.ResourceOptions( + provider=k8s_provider, + #depends_on=depends_on, + parent=parent + ) + ) + else: + worker_vm_pool = None + + return worker_vm_pool + +def generate_talos_vmpool_spec( + vm_pool_name: str, + namespace: str, + replicas: int, + cpu_cores: int, + memory_size: str, + root_disk_size: str, + empty_disk_size: str, + image_address: str, + network_name: str, + running: bool + ) -> dict: + """ + Generate the VirtualMachinePool spec for Talos VMs. + """ + # Ensure the correct image is passed here + docker_image_address = f"docker://{image_address}" + + # Initialize the spec with a root disk data volume template + spec = { + "replicas": replicas, + "selector": { + "matchLabels": { + "kubevirt.io/vmpool": vm_pool_name + } + }, + "virtualMachineTemplate": { + "metadata": { + "labels": { + "kubevirt.io/vmpool": vm_pool_name + } + }, + "spec": { + "running": running, + "template": { + "metadata": { + "labels": { + "kubevirt.io/vmpool": vm_pool_name + } + }, + "spec": { + "networks": [ + { + "name": "eth0", + "multus": { + "networkName": network_name + } + } + ], + "domain": { + "cpu": { + "cores": cpu_cores # Use configured CPU cores + }, + "resources": { + "requests": { + "memory": f"{memory_size}Gi" # Use configured memory size + } + }, + "devices": { + "disks": [ + { + "name": "talos-root-disk", + "bootOrder": 1, + "disk": { + "bus": "virtio" + } + } + ], + "interfaces": [ + { + "name": "eth0", + "bridge": {} + } + ] + } + }, + "volumes": [ + { + "name": "talos-root-disk", + "dataVolume": { + "name": f"{vm_pool_name}-root-dv" + } + } + ] + } + }, + "dataVolumeTemplates": [ + { + "metadata": { + "name": f"{vm_pool_name}-root-dv" + }, + "spec": { + "storage": { + "accessModes": ["ReadWriteOnce"], + "resources": { + "requests": { + "storage": f"{root_disk_size}Gi" # Use configured root disk size + } + } + }, + "source": { + "registry": { + "url": docker_image_address, # Ensure the correct image URL is propagated here + } + } + } + } + ] + } + } + } + + # If the empty disk size is greater than 0, add the empty disk to the spec + if int(empty_disk_size) > 0: + spec["virtualMachineTemplate"]["spec"]["template"]["spec"]["domain"]["devices"]["disks"].append( + { + "name": "talos-empty-disk", + "disk": { + "bus": "virtio" + } + } + ) + + spec["virtualMachineTemplate"]["spec"]["template"]["spec"]["volumes"].append( + { + "name": "talos-empty-disk", + "dataVolume": { + "name": f"{vm_pool_name}-empty-dv" + } + } + ) + + # Append the empty disk data volume template + spec["virtualMachineTemplate"]["spec"]["dataVolumeTemplates"].append( + { + "metadata": { + "name": f"{vm_pool_name}-empty-dv" + }, + "spec": { + "storage": { + "accessModes": ["ReadWriteOnce"], + "resources": { + "requests": { + "storage": f"{empty_disk_size}Gi" # Use configured empty disk size + } + } + }, + "source": { + "blank": {} + } + } + } + ) + + return spec diff --git a/pulumi/stacks/Pulumi.ci.yaml b/pulumi/stacks/Pulumi.ci.yaml deleted file mode 100644 index faf5eb8..0000000 --- a/pulumi/stacks/Pulumi.ci.yaml +++ /dev/null @@ -1,14 +0,0 @@ -config: - kargo:cilium: - enabled: false - kargo:kubernetes: - context: admin@talos-kargo-docker - distribution: talos - kubeconfig: /workspaces/Kargo/.kube/config - kargo:kubevirt: - emulation: false - enabled: true - kargo:multus: - enabled: false - kargo:vm: - enabled: false diff --git a/pulumi/stacks/Pulumi.ci.yaml.bak b/pulumi/stacks/Pulumi.ci.yaml.bak deleted file mode 100644 index 788a24d..0000000 --- a/pulumi/stacks/Pulumi.ci.yaml.bak +++ /dev/null @@ -1,29 +0,0 @@ -config: - kargo:cdi.enabled: "false" - kargo:cdi.version: 1.59.0 - kargo:cert_manager.enabled: "false" - kargo:cert_manager.version: 1.14.5 - kargo:cilium.enabled: "true" - kargo:cnao.enabled: "false" - kargo:cnao.version: 0.79.5 - kargo:hostpath_provisioner.enabled: "false" - kargo:hostpath_provisioner.version: 0.19.0 - kargo:kubernetes: kind - kargo:kubernetes.context: kind-kargo - kargo:kubernetes.distribution: kind - kargo:kubernetes.kubeconfig: /workspaces/Kargo/.kube/config - kargo:kubernetes_dashboard.enabled: "false" - kargo:kubernetes_dashboard.version: 7.4.0 - kargo:kubevirt.enabled: "false" - kargo:kubevirt.version: 1.2.0 - kargo:kubevirt_maager.enabled: "false" - kargo:multus.enabled: "true" - kargo:openunison.dns_suffix: kargo.arpa - kargo:openunison.enabled: "false" - kargo:openunison.github.client_id: Ov23li28jxV3tmAviMVl - kargo:openunison.github.client_secret: - secure: AAABAEQGM+PYpnW0+MOeMdrRc1byEVg3Hp+aMPyB9BluzfjjpGeus1U4hhjf0fXt1xTmXrAJ8H6iGpzQmGAFi6ARgYZ3I/gd - kargo:openunison.github.teams: ContainerCraft/kubeadmin,ContainerCraft/ - kargo:openunison.version: 3.0.11 - kargo:prometheus.enabled: "false" - kargo:prometheus.version: 58.7.2 diff --git a/pulumi/stacks/Pulumi.mordor.yaml b/pulumi/stacks/Pulumi.mordor.yaml deleted file mode 100644 index 7c98452..0000000 --- a/pulumi/stacks/Pulumi.mordor.yaml +++ /dev/null @@ -1,37 +0,0 @@ -config: - kargo:kubernetes: - context: usrbinkat-optiplexprime - kubeconfig: /tmp/esc-1934694674 - kargo:cilium: - enabled: false - kargo:cert_manager: - enabled: false - version: 1.15.1 - kargo:kubevirt: - enabled: false - version: 1.3.0 - kargo:hostpath_provisioner: - enabled: false - version: 0.19.0 - default_storage_class: true - kargo:prometheus: - enabled: false - version: 61.3.2 - kargo:cdi: - enabled: false - version: 1.59.0 - kargo:multus: - enabled: false - kargo:cnao: - enabled: false - version: 0.94.2 - kargo:kubernetes_dashboard: - enabled: false - version: 7.4.0 - kargo:openunison: - enabled: false - version: 3.0.11 - dns_suffix: kargo.arpa - github: - client_id: Ov23li28jxV3tmAviMVl - teams: ContainerCraft/kubeadmin,ContainerCraft/ diff --git a/pulumi/stacks/Pulumi.optiplexprime.yaml b/pulumi/stacks/Pulumi.optiplexprime.yaml new file mode 100644 index 0000000..39b66a5 --- /dev/null +++ b/pulumi/stacks/Pulumi.optiplexprime.yaml @@ -0,0 +1,24 @@ +config: + kargo:kubernetes: + context: usrbinkat-optiplexprime # Kubernetes context for the stack + kargo:talos: + controlplane: + cpu_cores: 1 # Controlplane CPU cores + empty_disk_size: "0" # Controlplane empty disk size (0 for no empty disk) + image: docker.io/containercraft/omni:1.7.6 # Image to use for controlplane + memory_size: "2" # Controlplane memory in GiB + network_name: "br0" # Multus network name for controlplane + replicas: ha # Specifies 'single' (1) or 'ha' (3) replicas + root_disk_size: "32" # Controlplane root disk size in GiB + enabled: true # Enable Talos deployment + running: false # Kargo-on-Kargo Dev Cluster Running/Stopped + workers: + cpu_cores: 3 # Worker CPU cores + empty_disk_size: "16" # Extra disk size in GiB, set 0 to disable + image: docker.io/containercraft/omni:1.7.6 # Image to use for workers + memory_size: "6" # Worker memory in GiB + network_name: "br0" # Multus network name for workers + replicas: 2 # Worker replicas, set 0 to disable workers vmpool + root_disk_size: "64" # Root disk size in GiB + kargo:vm: # Ubuntu VM deployment configuration + enabled: false # Disable VM deployment (set to true if needed)