From 27c72f00624fdffa2548286874d7b14c59f57358 Mon Sep 17 00:00:00 2001 From: Kat Morgan Date: Thu, 29 Aug 2024 04:14:31 +0000 Subject: [PATCH] working 3 node bare metal build/destroy/rebuild from scratch --- Pulumi.yaml | 4 +- docs/example.machineconfig.yaml | 38 ++-- docs/hack/pod-pvc-test.yaml | 16 +- docs/hack/ubuntu-br0-persistent.yaml | 55 ++--- docs/hack/ubuntu-br0.yaml | 4 +- docs/hack/wipe.yaml | 207 ++++++++++-------- docs/metal/optiplexprime/cluster.yaml | 6 +- .../optiplexprime/{op1.yaml => machine.yaml} | 46 +--- docs/metal/optiplexprime/op2.yaml | 52 ----- docs/metal/optiplexprime/op3.yaml | 52 ----- docs/metal/optiplexprime/patch.yaml | 32 +-- pulumi/src/hostpath_provisioner/deploy.py | 1 + 12 files changed, 186 insertions(+), 327 deletions(-) rename docs/metal/optiplexprime/{op1.yaml => machine.yaml} (50%) delete mode 100644 docs/metal/optiplexprime/op2.yaml delete mode 100644 docs/metal/optiplexprime/op3.yaml diff --git a/Pulumi.yaml b/Pulumi.yaml index 6b77d70..6b9ed04 100644 --- a/Pulumi.yaml +++ b/Pulumi.yaml @@ -28,11 +28,11 @@ config: cert_manager: value: enabled: true - version: 1.15.1 + version: 1.15.3 kubevirt: value: enabled: true - version: 1.3.0 + version: 1.3.1 hostpath_provisioner: value: enabled: true diff --git a/docs/example.machineconfig.yaml b/docs/example.machineconfig.yaml index 7ba38e8..770ed89 100644 --- a/docs/example.machineconfig.yaml +++ b/docs/example.machineconfig.yaml @@ -18,10 +18,10 @@ machine: # Used to provide additional options to the kubelet. kubelet: - image: ghcr.io/siderolabs/kubelet:v1.30.3 # The `image` field is an optional reference to an alternative kubelet image. + image: ghcr.io/siderolabs/kubelet:v1.30.4 # The `image` field is an optional reference to an alternative kubelet image. defaultRuntimeSeccompProfileEnabled: true # Enable container runtime default Seccomp profile. disableManifestsDirectory: true # The `disableManifestsDirectory` field configures the kubelet to get static pod manifests from the /etc/kubernetes/manifests directory. - + # # The `ClusterDNS` field is an optional reference to an alternative kubelet clusterDNS ip list. # clusterDNS: # - 10.96.0.10 @@ -82,7 +82,7 @@ machine: # gateway: 192.168.2.1 # The route's gateway (if empty, creates link scope route). # metric: 1024 # The optional metric for the route. # mtu: 1500 # The interface's MTU. - # + # # # # Picks a network device using the selector. # # # select a device with bus prefix 00:*. @@ -182,7 +182,7 @@ machine: disk: /dev/sda # The disk used for installations. image: ghcr.io/siderolabs/installer:v1.7.6 # Allows for supplying the image used to perform the installation. wipe: false # Indicates if the installation disk should be wiped at installation time. - + # # Look up disk using disk attributes like model, size, serial and others. # diskSelector: # size: 4GB # Disk size. @@ -213,12 +213,12 @@ machine: # # The TLS configuration for the registry. # tls: # insecureSkipVerify: true # Skip TLS server certificate verification (not recommended). - # + # # # # Enable mutual TLS authentication with the registry. # # clientIdentity: # # crt: LS0tIEVYQU1QTEUgQ0VSVElGSUNBVEUgLS0t # # key: LS0tIEVYQU1QTEUgS0VZIC0tLQ== - # + # # # # The auth configuration for this registry. # # auth: # # username: username # Optional registry authentication. @@ -237,7 +237,7 @@ machine: # Configures host DNS caching resolver. hostDNS: enabled: true # Enable host DNS caching resolver. - + # # Configure Talos API access from Kubernetes pods. # kubernetesTalosAPIAccess: # enabled: true # Enable Talos API access from Kubernetes pods. @@ -247,7 +247,7 @@ machine: # # The list of Kubernetes namespaces Talos API access is available from. # allowedKubernetesNamespaces: # - kube-system - + # # Provides machine specific control plane configuration options. # # ControlPlane definition example. @@ -280,7 +280,7 @@ machine: # # A list of partitions to create on the disk. # partitions: # - mountpoint: /var/mnt/extra # Where to mount the partition. - # + # # # # The size of partition: either bytes or human readable representation. If `size:` is omitted, the partition is sized to occupy the full disk. # # # Human readable representation. @@ -344,11 +344,11 @@ machine: # - # Deterministically generated key from the node UUID and PartitionLabel. # nodeID: {} # slot: 0 # Key slot number for LUKS2 encryption. - # + # # # # KMS managed encryption key. # # kms: # # endpoint: https://192.168.88.21:4443 # KMS endpoint to Seal/Unseal the key. - # + # # # # Cipher kind to use for the encryption. Depends on the encryption provider. # # cipher: aes-xts-plain64 @@ -414,7 +414,7 @@ cluster: # The service subnet CIDR. serviceSubnets: - 10.96.0.0/12 - + # # The CNI used. # cni: # name: custom # Name of CNI to use. @@ -436,7 +436,7 @@ cluster: key: LS0tLS1CRUdJTiBSU0EgUFJJVkFURSBLRVktLS0tLQpNSUlKS2dJQkFBS0NBZ0VBMjdlaEc2M0hubHJkakpWWFdlUXdxTE9PN2duVmpEaVByd1orMVd5QTBHZ0t0TXhHClZYNk5NKzJhYzNHZkQrWW1uU3pHM0NWU3lDMXp1RGpuaE1DSFZTZWtDM3Fyck5pWVBEbk5WcEY1NDBVeERYVEUKNGNyUzNvUUMycitiQnU1M2VDYk80cm9XbHFuV0wybmxNay9Ka2xrUDcvaisxSnRwUlVNUkpVMVZwcFhFcDRjRApsVW9KeU5SSmFVM2FlQXhFbUxwQUdkSndTNWJGalZwSVU1TmM4Qml0MUJaWVlZaml1SmJsc3JENXBDeEFUczQxCm1mUEg2cm9SbVNZZFVETkYrM0JaVzNYRkw5blNZT25vcUxtWTVsN283V1JCVk9EemtxbDhHYVF6L3hxRGNEMjYKeTE1VWFjc0tvYnJXWGhITzlRK2tsWStlRHZQaytqYStRZGljN1hneDgzV2FlTWIzTjI4WmVjc1RwMWhFcGNnVgpIaVZNV0k1MU5UVWRjSEpQQ2lqQUlYZ1pyS2tSSEwxWW01YU04Q0duRXIvMjFLK3pGd3ljZFlKY0p6dzlNd3ZmCk1QQXNibW9rZHJSc0lZYjhKYWNPa1VUU0NmSGlVRVBKRkpnR0d3Z25oZUFLTzR1bWhSY08wRm1OYnM2bDA0UTEKNGU0aWNKRjV1WHdQbkpmT3k3WVN0TTlIdXNEdndyaTlGREJUaHFYb2t4aDZBR3hqaVpESEtBeVJ4ZGdWQVFEZwp3NzVKc0VhOGtBZng4TWJNSHF3emRBMldMeE8vVi9kc2VnRE4valBLTWV4dDF6V0NVYmdZR3lST0dNWG5lWHdtCkR1USswa1VnaEpWeVZFcFA5aWFHNjJuOGhaYVVJQnhKYWZiY1cvWDlDZU5nV0Rwc25JM1ZMQ3Z6cVBzQ0F3RUEKQVFLQ0FnQmthb2czSGpVZEttb1cxaW9WNnpSUjViNXREQkdOQkV5bXNWRkdPVkNsN3ViMXVtWnFyR3ZjVDd0SApCNmFxbllVSVVHNlNMbnluQUI3akV4NjhqVUZNcWRGUGJjRUlQZlZWa1JWSGV6VTVxV1hCd0p3NmxYZktPb3NUCmJuNXJySkR0V004MHo5ZURCQnZmajRZd2VFYW5uQXlzb0IvWEN6QVJkNVZlZHZtdUtNUzN5L3pYUGY4NnljckQKVmRHRERCaE50TkdXKzVaVzlaRlVDTzhGUTFRMlp4RUszU3VIYUFnMlZNQ2w2and0ekF1WCtLYUd0ZU9vWTJHbgpuTVBJNDZZWVJFQ1NqRUNTbmZxSVVGOEV2YzRqZXp6NVFkaVo4alZmMGhYbG9pWWhjUHloVDVVUFhRMDNUSlNQClZkU2pLWUJzZjdtNG9PcWI2dHU3ZjBsMHpvcnBKQXEwRllPRCtjMGJnT3J3SVE3Z0J3eSs2bUhFVGJyOEd1aSsKT1Z3NmJ5WEVwa09GNHZFWUlkMFo1a1JVYU5YVC9lMHRSTUtPcUJLT3BoNWFOODlBY3RXTkVQbytUd0dZVjJjMApSRjdDZ2xtNjVPNkJBMFJXVnhZazdKaW1YanlCZENiaFFRMktWKytuTzUwbDk4alAyR1NTNTBTVzVYZ1pPTFRmClJPaStCWkE2dWJnSkxNak15UG5uWUNPT05Zbkd4bXVEWGk4Vlg3bnNGY2RQS3NXTDZjTG5MQWFYTzhvcEY0TlEKVy9PWWlRdGhOZEI0NFR6QjB0MFZMQmI0MWI4dStxb01HOWM0cFJxdXZmUjFPL1NjTjdiWHlnVjFzUEsyaFBxRApBNkV4aE5wZ0VmVlI0MmRLcUJpVUQ5WWFXMXFvVllodVBNUCtxVGg1Z0xCWEovbVFBUUtDQVFFQTQwclZ1WEFrCm1lQ3lRWHVKdmNPN1NFdEIrbFB3SGdnYzBEMHlZb09xTktKWEZNYkJpeUJubGRIQVBFdEROZlNwQTFpUEU5ME8KeFNjRisrMENVb292anZpbnZVVXh1eGlGV3RwejN1RDlCcTJybkZSK2RwdkpybDJSMU1mVXE4blFUU2luNHdDdgpQbFA0dkloSG5RRHlMLy9GVlEvZ3NpU2ZaVUprNGdkSHhxNEFMQm9MZ1gwWFFwOEhwVlk4ZU9oUmdDb1VUcUpKCk1CREQ1OGYwKzBra0N6aUhiRkx4UUQzVzRiTnNDanlXNEJPY0R1K2RNeUN1SEZLeEdKamdBODJDakJNbGd0c0QKVVdhMDZQR205K1F2QTB2Q3lzWE5QKzE2WENBaW5oTEFoR25zdnpOdjl0N2hNYVFJVUVVYjV6NmtvQndvVVNBcQp5SXBnK0ozajFnOWhLUUtDQVFFQTkzZmVBYlVORm9POVpuTm1HbkxyNEFCek1oLzEwOXFUbjlHQmIrbTk4RVZyClVTdFNkLzJ3SUhlOVM3dmk3blMvMXFONmZrcEtlWlJaNFVJTG5ONFJYazk4TldCdUs4ZUtOcTEzaFlTRkNlbE4KdnVMR1dJOHh6cUFnM3ltUTlUeVJiUG5DMS96ZWxId2d4U2VkV0F2ZjVEZThsQ1R0THJ3djcyQ3l2WVVHTXV1Nwpselp5MWRmU1lseGFDRjQ5eXVmMUFrNUdYUnFPb1dPZzRGbzc4RWxDTjJhNUxkOU1kSFl6WFArQUt4T0krUU1XCjErclRKWHpjdXYxRVZKcGxvK2hmejI3SkZIdzVxb0c3eHpEMDJvMURGenRPd1AxQUZUUUR2ZXB2SEVLdHRFcnEKOEhGQzVYKzF4Rno3QWM5dWRYUWNxc3VEL1FqMy82SjdwQ3l2Z3lhSmd3S0NBUUVBcUs4Yld0cHRiMzRoRFJRZwpMNk9aMUpkMXlkQ052ZHdMZGtoVW9tUlE1MnNyOFpGb2hsWndTZkkzRndoTVF6am1Ua05sUERxdEFZaHFtb05WClcraU9UeThmWGdxQ1UvWm8wZnFhVCtqdUlBeTM5ZmVITzROMjlGOGhJYWVFb0ZIUXM5TGVka3hGd3A0VFZlaFcKTGFGTUhHOVlCajhZWlYxa3BvUFYxeUhNRlJtQnF6bFJITEszbG92cjRyclRlbkFBUEh2K1FqV1MxNGM1dlVUYQovYnRVY3JkcldldGNmRzdva096VjIrc0hsaGsza3FZbUpwRzBaZlRBMWg1dThxT01wV1I0VEZzNlpTa3lUa2M1CndYT3BxR3FMa0cyb1hDOWt4Z1piYXJGRUM0R0pZSVZPTFF0UVdjVUJIb0dMcWQwRjFJWFJMaFFQYkhHMmNnekwKMEV2dHNRS0NBUUVBemprUHd2Qnp4YTFuS25kRVFRby8wbDNOb1puTXJBMWNORFRNM3N3MHdvTzNHTjgzVEZNVwpjUmdYb1lNamEzL1RWalRKb1hDYmU1SExETktUQnRod05OMmNMb2I3aUhReWs3b2UvaDhmRnRGYlBwV1FvTUJECnkyTFp3ZDdMcy9JVFpuTUVraVZoL3U4cUIrSXBWdlFlVWZxNHBlUFgyN2hyWWRwOVdUMy9tbk9qMGpGSHBUakQKVThDT0xxY0thNHE5UEh5cVduQUZLQ3owSmNKTWJNTS84UmNqZmp5MVkrQVVrWTFCNjZQTjQyVlZwZjFFazlybgp2dFBNRzR6RzRyNkZDVUhUUThUNThVTkhqb2tYb3dGUlJZemc1aWFsUG9Zb0VCM0xGZ3ZpNFhvRkt2WFh2NVNiCmp0WEZQdzd6aCtHSStqTzYvQ1I3Tm9iYTdrZG13NjViclFLQ0FRRUFqbDFML1dIQmpCUGYwM2FweFkzT3Q5ZWkKLzE1UEtYQVVnc3JpaFNOd3UxSERuNUcrT1VtUldWTWlncTlzWWNGWWdhSm4zNTliczJpL1RheDE4ZWdTZVhBagozSDhQQWRIRGVGSEI0K1ZmdVF5aCtxTXAxYWJQU2ZwWnV3dXY5VzNvSkpNcUhkQUo2bmtCMG9QNkN2TDlGeldCCkRzZ0dnZlFGTE9Ud1pucXVHZ1J1NGxuVmZabGliQzZlSnlBelZydXFDd2JqOHBFKzEwSFd1eUV1d1BqRURKVnoKVHFEa3B2UHRxMUdLK2lpeWk4emcvOXgzdzNyaGpOZENFRmhNZ3RtdHM4MXdHdVpxY1RqczB6eXd0cm1vYzJBSQpaWHllaUMvdk5vSW5pSzVRMWlqSHNVcnAzWVNPSCtEdmo3Skc0djM5ZWwyRm9WVjFlaS8ybFV0TUdwZ2hudz09Ci0tLS0tRU5EIFJTQSBQUklWQVRFIEtFWS0tLS0tCg== # API server specific configuration options. apiServer: - image: registry.k8s.io/kube-apiserver:v1.30.3 # The container image used in the API server manifest. + image: registry.k8s.io/kube-apiserver:v1.30.4 # The container image used in the API server manifest. # Extra certificate subject alternative names for the API server's certificate. certSANs: - 127.0.0.1 @@ -468,16 +468,16 @@ cluster: - level: Metadata # Controller manager server specific configuration options. controllerManager: - image: registry.k8s.io/kube-controller-manager:v1.30.3 # The container image used in the controller manager manifest. + image: registry.k8s.io/kube-controller-manager:v1.30.4 # The container image used in the controller manager manifest. # Kube-proxy server-specific configuration options proxy: - image: registry.k8s.io/kube-proxy:v1.30.3 # The container image used in the kube-proxy manifest. - + image: registry.k8s.io/kube-proxy:v1.30.4 # The container image used in the kube-proxy manifest. + # # Disable kube-proxy deployment on cluster bootstrap. # disabled: false # Scheduler server specific configuration options. scheduler: - image: registry.k8s.io/kube-scheduler:v1.30.3 # The container image used in the scheduler manifest. + image: registry.k8s.io/kube-scheduler:v1.30.4 # The container image used in the scheduler manifest. # Configures cluster member discovery. discovery: enabled: true # Enable the cluster membership discovery feature. @@ -496,7 +496,7 @@ cluster: ca: crt: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUJmakNDQVNTZ0F3SUJBZ0lSQUkrSVdIeTk2bUIrdTh4cXhubmNnZ2t3Q2dZSUtvWkl6ajBFQXdJd0R6RU4KTUFzR0ExVUVDaE1FWlhSalpEQWVGdzB5TkRBNE1qZ3lNRFExTXpKYUZ3MHpOREE0TWpZeU1EUTFNekphTUE4eApEVEFMQmdOVkJBb1RCR1YwWTJRd1dUQVRCZ2NxaGtqT1BRSUJCZ2dxaGtqT1BRTUJCd05DQUFUVHdUVFVFcytiCm9tNi9VZlFTZ1dyYzBiOEV1SkJxYy95dk9LcjlsczlSMW9kdUtacHliTzM2UGMyelBaZWk2L0RNeVRHR09VL0wKSC9mc0pqTUJncFZ5bzJFd1h6QU9CZ05WSFE4QkFmOEVCQU1DQW9Rd0hRWURWUjBsQkJZd0ZBWUlLd1lCQlFVSApBd0VHQ0NzR0FRVUZCd01DTUE4R0ExVWRFd0VCL3dRRk1BTUJBZjh3SFFZRFZSME9CQllFRkZ6Z24yN3h5eStICm4ySEZUYzBEM09nVjNXcy9NQW9HQ0NxR1NNNDlCQU1DQTBnQU1FVUNJRllpUkZheUpCUUF1eGw5VVhrTE9GRmEKcVhIUHlORllrYk9oaU00ZVdhd2RBaUVBN2RVWkVieGIyZDlCd0g0cE4zNm1sZHY1V2dRVXhGaXZPSERaNjhRQgoyYkU9Ci0tLS0tRU5EIENFUlRJRklDQVRFLS0tLS0K key: LS0tLS1CRUdJTiBFQyBQUklWQVRFIEtFWS0tLS0tCk1IY0NBUUVFSU9UYmRPczlXV2Z4UUw1OXZKSExoSnFDcnpmVGpOQkpUclhlSkFSYlZXNUNvQW9HQ0NxR1NNNDkKQXdFSG9VUURRZ0FFMDhFMDFCTFBtNkp1djFIMEVvRnEzTkcvQkxpUWFuUDhyemlxL1piUFVkYUhiaW1hY216dAorajNOc3oyWG91dnd6TWt4aGpsUHl4LzM3Q1l6QVlLVmNnPT0KLS0tLS1FTkQgRUMgUFJJVkFURSBLRVktLS0tLQo= - + # # The container image used to create the etcd service. # image: gcr.io/etcd-development/etcd:v3.5.13 @@ -517,7 +517,7 @@ cluster: # metadata: # name: ci - + # # A key used for the [encryption of secret data at rest](https://kubernetes.io/docs/tasks/administer-cluster/encrypt-data/). # # Decryption secret example (do not use in production!). diff --git a/docs/hack/pod-pvc-test.yaml b/docs/hack/pod-pvc-test.yaml index 58b3ac1..2c628c4 100644 --- a/docs/hack/pod-pvc-test.yaml +++ b/docs/hack/pod-pvc-test.yaml @@ -11,21 +11,25 @@ spec: storage: 1Gi storageClassName: ssd volumeMode: Filesystem - volumeName: nginx-pv --- apiVersion: v1 kind: Pod metadata: name: nginx-pod - annotations: - kubernetes.io/psp: restricted - seccomp.security.alpha.kubernetes.io/pod: 'runtime/default' - apparmor.security.beta.kubernetes.io/allowedProfileNames: 'runtime/default' - apparmor.security.beta.kubernetes.io/defaultProfileName: 'runtime/default' spec: + securityContext: + seccompProfile: + type: RuntimeDefault containers: - name: nginx-container image: nginx + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: ["ALL"] + runAsNonRoot: false + seccompProfile: + type: RuntimeDefault resources: limits: cpu: "0.5" diff --git a/docs/hack/ubuntu-br0-persistent.yaml b/docs/hack/ubuntu-br0-persistent.yaml index 6171e12..fba9a12 100644 --- a/docs/hack/ubuntu-br0-persistent.yaml +++ b/docs/hack/ubuntu-br0-persistent.yaml @@ -9,48 +9,26 @@ spec: registry: url: "docker://docker.io/containercraft/ubuntu:22.04" pvc: - accessModes: - - ReadWriteOnce - resources: - requests: - storage: 256G storageClassName: "ssd" volumeMode: Filesystem + resources: + requests: + storage: 64G + accessModes: + - ReadWriteOnce --- -# ssh-keygen: kubectl create secret generic user-kc2-sshpubkey --from-file=key1=.ssh/id_rsa.pub --dry-run=client -oyaml | kubectl apply -f - apiVersion: kubevirt.io/v1 kind: VirtualMachine metadata: - name: ubuntu + name: ubuntu-br0-persistent namespace: default labels: - app: kc2 + app: ubuntu + feature: xrdp spec: running: true template: spec: - nodeSelector: - kubernetes.io/hostname: "mordor" - affinity: - nodeAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - nodeSelectorTerms: - - matchExpressions: - - key: kubernetes.io/hostname - operator: In - values: - - "mordor" - #nodeSelector: - # kubernetes.io/hostname: "op1" - #affinity: - # nodeAffinity: - # requiredDuringSchedulingIgnoredDuringExecution: - # nodeSelectorTerms: - # - matchExpressions: - # - key: kubernetes.io/hostname - # operator: In - # values: - # - "op1" domain: clock: utc: {} @@ -68,7 +46,12 @@ spec: model: host-passthrough dedicatedCpuPlacement: false memory: - guest: 8192M + guest: 8G + machine: + type: q35 + resources: + requests: + memory: 8G devices: rng: {} autoattachPodInterface: false @@ -95,17 +78,9 @@ spec: bootloader: efi: secureBoot: true - machine: - type: q35 - resources: - requests: - memory: 8192M hostname: ccio networks: - name: enp1s0 - multus: - networkName: lan - - name: enp2s0 multus: networkName: br0 terminationGracePeriodSeconds: 0 @@ -113,7 +88,7 @@ spec: - sshPublicKey: source: secret: - secretName: user-kc2-sshpubkey + secretName: kc2-pubkey propagationMethod: qemuGuestAgent: users: diff --git a/docs/hack/ubuntu-br0.yaml b/docs/hack/ubuntu-br0.yaml index ad8a41b..066765c 100644 --- a/docs/hack/ubuntu-br0.yaml +++ b/docs/hack/ubuntu-br0.yaml @@ -1,7 +1,7 @@ apiVersion: kubevirt.io/v1 kind: VirtualMachine metadata: - name: ubuntu + name: ubuntu-br0 namespace: default labels: app: kc2 @@ -68,7 +68,7 @@ spec: - sshPublicKey: source: secret: - secretName: user-kc2-sshpubkey + secretName: kc2-pubkey propagationMethod: qemuGuestAgent: users: diff --git a/docs/hack/wipe.yaml b/docs/hack/wipe.yaml index 5d8e05a..b8444d3 100644 --- a/docs/hack/wipe.yaml +++ b/docs/hack/wipe.yaml @@ -2,38 +2,43 @@ apiVersion: v1 kind: Pod metadata: - name: disk-erase-pod-cp1 + name: disk-erase-pod-op1 labels: app: disk-erase spec: - # Uncomment and specify the specific node you want to debug - nodeName: cp1 + nodeName: talos-3l6-kgu containers: - - image: ghcr.io/containercraft/konductor - command: ["/bin/sh"] - args: - - "-c" - - > - sleep 10; # Wait a bit for system to be ready - # Replace /dev/nvmeXn1 with your nvme device identifier - sfdisk --delete /dev/sda; - wipefs -a /dev/sda; - # Your additional commands here - imagePullPolicy: IfNotPresent - name: disk-erase-container - securityContext: - capabilities: - add: ["NET_ADMIN", "SYS_ADMIN"] - runAsUser: 0 # run as root (or any other user) - volumeMounts: - - name: dev - mountPath: /dev - - name: sys - mountPath: /sys - restartPolicy: Never # we want to be intentional about running this pod - hostIPC: true # Use the host's ipc namespace - hostNetwork: true # Use the host's network namespace - hostPID: true # Use the host's pid namespace + - image: ghcr.io/containercraft/konductor + name: disk-erase-container + command: ["/bin/bash", "-c", "sleep infinity"] + #command: [ + # "/bin/bash", + # "-c", + # "sudo apt update && sudo apt install -y fdisk && sudo fdisk --delete /mnt/root/dev/sda; sudo wipefs --all /mnt/root/dev/sda; sync; echo 'done'" + #] + imagePullPolicy: IfNotPresent + securityContext: + privileged: true + capabilities: + add: ["SYS_ADMIN", "NET_ADMIN"] + runAsUser: 0 + volumeMounts: + - name: dev + mountPath: /mnt/root/dev + - name: sys + mountPath: /mnt/root/sys + - name: proc + mountPath: /mnt/root/proc + - name: root + mountPath: /mnt/root + resources: + limits: + cpu: "1" + memory: "1Gi" + hostPID: true + hostIPC: true + hostNetwork: true + restartPolicy: Never volumes: - name: dev hostPath: @@ -41,43 +46,54 @@ spec: - name: sys hostPath: path: /sys - + - name: proc + hostPath: + path: /proc + - name: root + hostPath: + path: / + type: Directory --- apiVersion: v1 kind: Pod metadata: - name: disk-erase-pod-cp2 + name: disk-erase-pod-op2 labels: app: disk-erase spec: - # Uncomment and specify the specific node you want to debug - nodeName: cp2 + nodeName: talos-5um-rqo containers: - - image: ghcr.io/containercraft/konductor - command: ["/bin/sh"] - args: - - "-c" - - > - sleep 10; # Wait a bit for system to be ready - # Replace /dev/nvmeXn1 with your nvme device identifier - sfdisk --delete /dev/sda; - wipefs -a /dev/sda; - # Your additional commands here - imagePullPolicy: IfNotPresent - name: disk-erase-container - securityContext: - capabilities: - add: ["NET_ADMIN", "SYS_ADMIN"] - runAsUser: 0 # run as root (or any other user) - volumeMounts: - - name: dev - mountPath: /dev - - name: sys - mountPath: /sys - restartPolicy: Never # we want to be intentional about running this pod - hostIPC: true # Use the host's ipc namespace - hostNetwork: true # Use the host's network namespace - hostPID: true # Use the host's pid namespace + - image: ghcr.io/containercraft/konductor + name: disk-erase-container + command: ["/bin/bash", "-c", "sleep infinity"] + #command: [ + # "/bin/bash", + # "-c", + # "sudo apt update && sudo apt install -y fdisk && sudo fdisk --delete /mnt/root/dev/sda; sudo wipefs --all /mnt/root/dev/sda; sync; echo 'done'" + #] + imagePullPolicy: IfNotPresent + securityContext: + privileged: true + capabilities: + add: ["SYS_ADMIN", "NET_ADMIN"] + runAsUser: 0 + volumeMounts: + - name: dev + mountPath: /mnt/root/dev + - name: sys + mountPath: /mnt/root/sys + - name: proc + mountPath: /mnt/root/proc + - name: root + mountPath: /mnt/root + resources: + limits: + cpu: "1" + memory: "1Gi" + hostPID: true + hostIPC: true + hostNetwork: true + restartPolicy: Never volumes: - name: dev hostPath: @@ -85,43 +101,54 @@ spec: - name: sys hostPath: path: /sys - + - name: proc + hostPath: + path: /proc + - name: root + hostPath: + path: / + type: Directory --- apiVersion: v1 kind: Pod metadata: - name: disk-erase-pod-cp3 + name: disk-erase-pod-op3 labels: app: disk-erase spec: - # Uncomment and specify the specific node you want to debug - nodeName: cp3 + nodeName: talos-s1t-lsd containers: - - image: ghcr.io/containercraft/konductor - command: ["/bin/sh"] - args: - - "-c" - - > - sleep 10; # Wait a bit for system to be ready - # Replace /dev/nvmeXn1 with your nvme device identifier - sfdisk --delete /dev/sda; - wipefs -a /dev/sda; - # Your additional commands here - imagePullPolicy: IfNotPresent - name: disk-erase-container - securityContext: - capabilities: - add: ["NET_ADMIN", "SYS_ADMIN"] - runAsUser: 0 # run as root (or any other user) - volumeMounts: - - name: dev - mountPath: /dev - - name: sys - mountPath: /sys - restartPolicy: Never # we want to be intentional about running this pod - hostIPC: true # Use the host's ipc namespace - hostNetwork: true # Use the host's network namespace - hostPID: true # Use the host's pid namespace + - image: ghcr.io/containercraft/konductor + name: disk-erase-container + command: ["/bin/bash", "-c", "sleep infinity"] + #command: [ + # "/bin/bash", + # "-c", + # "sudo apt update && sudo apt install -y fdisk && sudo fdisk --delete /mnt/root/dev/sda; sudo wipefs --all /mnt/root/dev/sda; sync; echo 'done'" + #] + imagePullPolicy: IfNotPresent + securityContext: + privileged: true + capabilities: + add: ["SYS_ADMIN", "NET_ADMIN"] + runAsUser: 0 + volumeMounts: + - name: dev + mountPath: /mnt/root/dev + - name: sys + mountPath: /mnt/root/sys + - name: proc + mountPath: /mnt/root/proc + - name: root + mountPath: /mnt/root + resources: + limits: + cpu: "1" + memory: "1Gi" + hostPID: true + hostIPC: true + hostNetwork: true + restartPolicy: Never volumes: - name: dev hostPath: @@ -129,4 +156,10 @@ spec: - name: sys hostPath: path: /sys - + - name: proc + hostPath: + path: /proc + - name: root + hostPath: + path: / + type: Directory diff --git a/docs/metal/optiplexprime/cluster.yaml b/docs/metal/optiplexprime/cluster.yaml index 1d02e49..d610caf 100644 --- a/docs/metal/optiplexprime/cluster.yaml +++ b/docs/metal/optiplexprime/cluster.yaml @@ -18,18 +18,18 @@ name: 4c4c4544-0036-3110-8047-c2c04f394e32 install: disk: /dev/nvme1n1 patches: - - file: docs/metal/optiplexprime/op1.yaml + - file: docs/metal/optiplexprime/machine.yaml --- kind: Machine name: 4c4c4544-0035-5210-804b-c2c04f394e32 install: disk: /dev/nvme1n1 patches: - - file: docs/metal/optiplexprime/op2.yaml + - file: docs/metal/optiplexprime/machine.yaml --- kind: Machine name: 4c4c4544-0056-3210-804c-b5c04f525032 install: disk: /dev/nvme1n1 patches: - - file: docs/metal/optiplexprime/op3.yaml + - file: docs/metal/optiplexprime/machine.yaml diff --git a/docs/metal/optiplexprime/op1.yaml b/docs/metal/optiplexprime/machine.yaml similarity index 50% rename from docs/metal/optiplexprime/op1.yaml rename to docs/metal/optiplexprime/machine.yaml index 5a43344..d149d4f 100644 --- a/docs/metal/optiplexprime/op1.yaml +++ b/docs/metal/optiplexprime/machine.yaml @@ -1,6 +1,15 @@ -version: v1alpha1 -debug: false machine: + network: + interfaces: + - interface: br0 + dhcp: true + bridge: + interfaces: + - enp3s0 + disks: + - device: /dev/sda + partitions: + - mountpoint: /var/mnt/hostpath-provisioner kubelet: extraMounts: - destination: /etc/cni/net.d @@ -17,36 +26,3 @@ machine: - bind - rshared - rw - disks: - - device: /dev/sdb - partitions: - - mountpoint: /var/mnt/hostpath-provisioner - time: - disabled: true - network: - hostname: op1 - nameservers: - - 192.168.1.1 - extraHostEntries: - - ip: 192.168.1.41 - aliases: - - op1 - interfaces: - - interface: br0 - mtu: 1500 - addresses: - - 192.168.1.41/24 - routes: - - network: 0.0.0.0/0 - gateway: 192.168.1.1 - metric: 1024 - bridge: - stp: - enabled: true - interfaces: - - enp3s0 - install: - wipe: true - extraKernelArgs: - - talos.platform=metal - - reboot=k diff --git a/docs/metal/optiplexprime/op2.yaml b/docs/metal/optiplexprime/op2.yaml deleted file mode 100644 index 6043a88..0000000 --- a/docs/metal/optiplexprime/op2.yaml +++ /dev/null @@ -1,52 +0,0 @@ -version: v1alpha1 -debug: false -machine: - kubelet: - extraMounts: - - destination: /etc/cni/net.d - type: bind - source: /etc/cni/net.d - options: - - bind - - rshared - - rw - - destination: /opt/cni/bin - type: bind - source: /opt/cni/bin - options: - - bind - - rshared - - rw - disks: - - device: /dev/sdb - partitions: - - mountpoint: /var/mnt/hostpath-provisioner - time: - disabled: true - network: - hostname: op2 - nameservers: - - 192.168.1.1 - extraHostEntries: - - ip: 192.168.1.42 - aliases: - - op2 - interfaces: - - interface: br0 - mtu: 1500 - addresses: - - 192.168.1.42/24 - routes: - - network: 0.0.0.0/0 - gateway: 192.168.1.1 - metric: 1024 - bridge: - stp: - enabled: true - interfaces: - - enp3s0 - install: - wipe: true - extraKernelArgs: - - talos.platform=metal - - reboot=k diff --git a/docs/metal/optiplexprime/op3.yaml b/docs/metal/optiplexprime/op3.yaml deleted file mode 100644 index cf72cd8..0000000 --- a/docs/metal/optiplexprime/op3.yaml +++ /dev/null @@ -1,52 +0,0 @@ -version: v1alpha1 -debug: false -machine: - kubelet: - extraMounts: - - destination: /etc/cni/net.d - type: bind - source: /etc/cni/net.d - options: - - bind - - rshared - - rw - - destination: /opt/cni/bin - type: bind - source: /opt/cni/bin - options: - - bind - - rshared - - rw - disks: - - device: /dev/sdb - partitions: - - mountpoint: /var/mnt/hostpath-provisioner - time: - disabled: true - network: - hostname: op3 - nameservers: - - 192.168.1.1 - extraHostEntries: - - ip: 192.168.1.43 - aliases: - - op3 - interfaces: - - interface: br0 - mtu: 1500 - addresses: - - 192.168.1.43/24 - routes: - - network: 0.0.0.0/0 - gateway: 192.168.1.1 - metric: 1024 - bridge: - stp: - enabled: true - interfaces: - - enp3s0 - install: - wipe: true - extraKernelArgs: - - talos.platform=metal - - reboot=k diff --git a/docs/metal/optiplexprime/patch.yaml b/docs/metal/optiplexprime/patch.yaml index 6430d9b..cee58e8 100644 --- a/docs/metal/optiplexprime/patch.yaml +++ b/docs/metal/optiplexprime/patch.yaml @@ -1,30 +1,4 @@ cluster: - allowSchedulingOnControlPlanes: true - apiServer: - # Disable PodSecurityPolicy in the API server and default manifests. - disablePodSecurityPolicy: true - # Configure the API server admission plugins. - admissionControl: - - name: PodSecurity - configuration: - apiVersion: pod-security.admission.config.k8s.io/v1alpha1 - kind: PodSecurityConfiguration - defaults: - audit: restricted - audit-version: latest - enforce: baseline - enforce-version: latest - warn: restricted - warn-version: latest - exemptions: - namespaces: - - kube-system - - hostpath-provisioner - - cdi - runtimeClasses: [] - usernames: [] - auditPolicy: - apiVersion: audit.k8s.io/v1 - kind: Policy - rules: - - level: Metadata + allowSchedulingOnControlPlanes: true + apiServer: + disablePodSecurityPolicy: true diff --git a/pulumi/src/hostpath_provisioner/deploy.py b/pulumi/src/hostpath_provisioner/deploy.py index d71ed81..3b27c0d 100644 --- a/pulumi/src/hostpath_provisioner/deploy.py +++ b/pulumi/src/hostpath_provisioner/deploy.py @@ -33,6 +33,7 @@ def deploy( ns_labels = { "kubevirt.io": "", "kubernetes.io/metadata.name": ns_name, + "pod-security.kubernetes.io/enforce": "privileged" } namespace = create_namespace( depends,