diff --git a/config/crd/bases/anywhere.eks.amazonaws.com_nutanixmachineconfigs.yaml b/config/crd/bases/anywhere.eks.amazonaws.com_nutanixmachineconfigs.yaml index 65ac440b26c8..113b00ef288e 100644 --- a/config/crd/bases/anywhere.eks.amazonaws.com_nutanixmachineconfigs.yaml +++ b/config/crd/bases/anywhere.eks.amazonaws.com_nutanixmachineconfigs.yaml @@ -74,6 +74,28 @@ spec: required: - type type: object + gpus: + description: List of GPU devices that should be added to the VMs. + items: + description: NutanixGPUIdentifier holds VM GPU device configuration. + properties: + deviceID: + description: deviceID is the device ID of the GPU device. + format: int64 + type: integer + name: + description: vendorID is the vendor ID of the GPU device. + type: string + type: + description: type is the type of the GPU device. + enum: + - deviceID + - name + type: string + required: + - type + type: object + type: array image: description: image is to identify the OS image uploaded to the Prism Central (PC) The image identifier (uuid or name) can be obtained diff --git a/pkg/providers/nutanix/config/md-template.yaml b/pkg/providers/nutanix/config/md-template.yaml index 0a204924a4f9..90e7b91c33c2 100644 --- a/pkg/providers/nutanix/config/md-template.yaml +++ b/pkg/providers/nutanix/config/md-template.yaml @@ -1,4 +1,4 @@ -{{- if .failureDomains }}{{- range $index, $fd := .failureDomains }} +{{- if $.failureDomains -}}{{ range $fd := $.failureDomains -}} apiVersion: cluster.x-k8s.io/v1beta1 kind: MachineDeployment metadata: @@ -99,7 +99,8 @@ spec: {{- end }} {{- end }} --- -{{- end }}{{- else }} +{{ end -}} +{{- else -}} apiVersion: cluster.x-k8s.io/v1beta1 kind: MachineDeployment metadata: @@ -209,7 +210,7 @@ spec: {{- end }} {{- end }} --- -{{- end }} +{{ end -}} apiVersion: bootstrap.cluster.x-k8s.io/v1beta1 kind: KubeadmConfigTemplate metadata: diff --git a/pkg/providers/nutanix/template_test.go b/pkg/providers/nutanix/template_test.go index eaf3936df2bf..eafeaeb4d291 100644 --- a/pkg/providers/nutanix/template_test.go +++ b/pkg/providers/nutanix/template_test.go @@ -729,29 +729,54 @@ func TestTemplateBuilderFailureDomains(t *testing.T) { func TestTemplateBuilderWorkersFailureDomains(t *testing.T) { for _, tc := range []struct { - Input string - OutputCP string - OutputMD string + Input string + OutputCP string + OutputMD string + workloadTemplateNames map[string]string + kubeadmconfigTemplateNames map[string]string }{ { Input: "testdata/eksa-cluster-worker-fds.yaml", OutputCP: "testdata/expected_results_worker_fds.yaml", OutputMD: "testdata/expected_results_worker_fds_md.yaml", + workloadTemplateNames: map[string]string{ + "eksa-unit-test": "eksa-unit-test", + }, + kubeadmconfigTemplateNames: map[string]string{ + "eksa-unit-test": "eksa-unit-test", + }, + }, + { + Input: "testdata/eksa-cluster-multi-worker-fds.yaml", + OutputCP: "testdata/expected_results_multi_worker_fds.yaml", + OutputMD: "testdata/expected_results_multi_worker_fds_md.yaml", + workloadTemplateNames: map[string]string{ + "eksa-unit-test-1": "eksa-unit-test-1", + "eksa-unit-test-2": "eksa-unit-test-2", + "eksa-unit-test-3": "eksa-unit-test-3", + }, + kubeadmconfigTemplateNames: map[string]string{ + "eksa-unit-test-1": "eksa-unit-test", + "eksa-unit-test-2": "eksa-unit-test", + "eksa-unit-test-3": "eksa-unit-test", + }, }, } { clusterSpec := test.NewFullClusterSpec(t, tc.Input) machineConf := clusterSpec.NutanixMachineConfig(clusterSpec.Cluster.Spec.ControlPlaneConfiguration.MachineGroupRef.Name) - workerConfs := make(map[string]anywherev1.NutanixMachineConfigSpec) + workerConfSpecs := make(map[string]anywherev1.NutanixMachineConfigSpec) for _, worker := range clusterSpec.Cluster.Spec.WorkerNodeGroupConfigurations { workerConf := clusterSpec.NutanixMachineConfig(worker.MachineGroupRef.Name) - workerConfs[worker.MachineGroupRef.Name] = workerConf.Spec + workerConfSpecs[worker.MachineGroupRef.Name] = workerConf.Spec } dcConf := clusterSpec.NutanixDatacenter + // workerMachineConfigs := clusterSpec.NutanixMachineConfigs + t.Setenv(constants.EksaNutanixUsernameKey, "admin") t.Setenv(constants.EksaNutanixPasswordKey, "password") creds := GetCredsFromEnv() - builder := NewNutanixTemplateBuilder(&dcConf.Spec, &machineConf.Spec, &machineConf.Spec, workerConfs, creds, time.Now) + builder := NewNutanixTemplateBuilder(&dcConf.Spec, &machineConf.Spec, &machineConf.Spec, workerConfSpecs, creds, time.Now) assert.NotNil(t, builder) buildSpec := test.NewFullClusterSpec(t, tc.Input) @@ -761,13 +786,16 @@ func TestTemplateBuilderWorkersFailureDomains(t *testing.T) { assert.NotNil(t, cpSpec) test.AssertContentToFile(t, string(cpSpec), tc.OutputCP) - workloadTemplateNames := map[string]string{ - "eksa-unit-test": "eksa-unit-test", - } - kubeadmconfigTemplateNames := map[string]string{ - "eksa-unit-test": "eksa-unit-test", - } - workerSpec, err := builder.GenerateCAPISpecWorkers(buildSpec, workloadTemplateNames, kubeadmconfigTemplateNames) + // workloadTemplateNames, kubeadmconfigTemplateNames := getTemplateNames(clusterSpec, builder, workerMachineConfigs) + // workloadTemplateNames := map[string]string{ + // "eksa-unit-test-1": "eksa-unit-test-1", + // "eksa-unit-test-2": "eksa-unit-test-2", + // "eksa-unit-test-3": "eksa-unit-test-3", + // } + // kubeadmconfigTemplateNames := map[string]string{ + // "eksa-unit-test": "eksa-unit-test", + // } + workerSpec, err := builder.GenerateCAPISpecWorkers(buildSpec, tc.workloadTemplateNames, tc.kubeadmconfigTemplateNames) assert.NoError(t, err) assert.NotNil(t, workerSpec) test.AssertContentToFile(t, string(workerSpec), tc.OutputMD) diff --git a/pkg/providers/nutanix/testdata/eksa-cluster-multi-worker-fds.yaml b/pkg/providers/nutanix/testdata/eksa-cluster-multi-worker-fds.yaml new file mode 100644 index 000000000000..70034a827cb4 --- /dev/null +++ b/pkg/providers/nutanix/testdata/eksa-cluster-multi-worker-fds.yaml @@ -0,0 +1,177 @@ +apiVersion: anywhere.eks.amazonaws.com/v1alpha1 +kind: Cluster +metadata: + name: eksa-unit-test + namespace: default +spec: + kubernetesVersion: "1.19" + controlPlaneConfiguration: + name: eksa-unit-test + count: 3 + endpoint: + host: test-ip + machineGroupRef: + name: eksa-unit-test + kind: NutanixMachineConfig + workerNodeGroupConfigurations: + - count: 4 + name: eksa-unit-test-1 + machineGroupRef: + name: eksa-unit-test-1 + kind: NutanixMachineConfig + - count: 3 + name: eksa-unit-test-2 + machineGroupRef: + name: eksa-unit-test-2 + kind: NutanixMachineConfig + - count: 2 + name: eksa-unit-test-3 + machineGroupRef: + name: eksa-unit-test-3 + kind: NutanixMachineConfig + datacenterRef: + kind: NutanixDatacenterConfig + name: eksa-unit-test + clusterNetwork: + cni: "cilium" + pods: + cidrBlocks: + - 192.168.0.0/16 + services: + cidrBlocks: + - 10.96.0.0/12 +--- +apiVersion: anywhere.eks.amazonaws.com/v1alpha1 +kind: NutanixDatacenterConfig +metadata: + name: eksa-unit-test + namespace: default +spec: + endpoint: "prism.nutanix.com" + port: 9440 + credentialRef: + kind: Secret + name: "nutanix-credentials" + failureDomains: + - name: "pe1" + workerMachineGroups: + - eksa-unit-test-1 + - eksa-unit-test-2 + - eksa-unit-test-3 + cluster: + type: name + name: "prism-cluster" + subnets: + - type: uuid + uuid: "2d166190-7759-4dc6-b835-923262d6b497" + - name: "pe2" + workerMachineGroups: + - eksa-unit-test-1 + - eksa-unit-test-2 + cluster: + type: uuid + uuid: "4d69ca7d-022f-49d1-a454-74535993bda4" + subnets: + - type: name + name: "prism-subnet" +--- +apiVersion: anywhere.eks.amazonaws.com/v1alpha1 +kind: NutanixMachineConfig +metadata: + name: eksa-unit-test + namespace: default +spec: + vcpusPerSocket: 1 + vcpuSockets: 4 + memorySize: 8Gi + image: + type: "name" + name: "prism-image" + cluster: + type: "name" + name: "prism-cluster" + subnet: + type: "name" + name: "prism-subnet" + systemDiskSize: 40Gi + osFamily: "ubuntu" + users: + - name: "mySshUsername" + sshAuthorizedKeys: + - "mySshAuthorizedKey" +--- +apiVersion: anywhere.eks.amazonaws.com/v1alpha1 +kind: NutanixMachineConfig +metadata: + name: eksa-unit-test-1 + namespace: default +spec: + vcpusPerSocket: 1 + vcpuSockets: 4 + memorySize: 8Gi + image: + type: "name" + name: "prism-image" + cluster: + type: "name" + name: "prism-cluster" + subnet: + type: "name" + name: "prism-subnet" + systemDiskSize: 40Gi + osFamily: "ubuntu" + users: + - name: "mySshUsername" + sshAuthorizedKeys: + - "mySshAuthorizedKey" +--- +apiVersion: anywhere.eks.amazonaws.com/v1alpha1 +kind: NutanixMachineConfig +metadata: + name: eksa-unit-test-2 + namespace: default +spec: + vcpusPerSocket: 1 + vcpuSockets: 2 + memorySize: 16Gi + image: + type: "name" + name: "prism-image" + cluster: + type: "name" + name: "prism-cluster" + subnet: + type: "name" + name: "prism-subnet" + systemDiskSize: 40Gi + osFamily: "ubuntu" + users: + - name: "mySshUsername" + sshAuthorizedKeys: + - "mySshAuthorizedKey" +--- +apiVersion: anywhere.eks.amazonaws.com/v1alpha1 +kind: NutanixMachineConfig +metadata: + name: eksa-unit-test-3 + namespace: default +spec: + vcpusPerSocket: 2 + vcpuSockets: 4 + memorySize: 4Gi + image: + type: "name" + name: "prism-image" + cluster: + type: "name" + name: "prism-cluster" + subnet: + type: "name" + name: "prism-subnet" + systemDiskSize: 40Gi + osFamily: "ubuntu" + users: + - name: "mySshUsername" + sshAuthorizedKeys: + - "mySshAuthorizedKey" +--- diff --git a/pkg/providers/nutanix/testdata/expected_results_additional_categories_md.yaml b/pkg/providers/nutanix/testdata/expected_results_additional_categories_md.yaml index 1d48b33b65fc..f9a674c5d435 100644 --- a/pkg/providers/nutanix/testdata/expected_results_additional_categories_md.yaml +++ b/pkg/providers/nutanix/testdata/expected_results_additional_categories_md.yaml @@ -1,4 +1,3 @@ - apiVersion: cluster.x-k8s.io/v1beta1 kind: MachineDeployment metadata: diff --git a/pkg/providers/nutanix/testdata/expected_results_autoscaling_md.yaml b/pkg/providers/nutanix/testdata/expected_results_autoscaling_md.yaml index 84003ac66d80..36350779f950 100644 --- a/pkg/providers/nutanix/testdata/expected_results_autoscaling_md.yaml +++ b/pkg/providers/nutanix/testdata/expected_results_autoscaling_md.yaml @@ -1,4 +1,3 @@ - apiVersion: cluster.x-k8s.io/v1beta1 kind: MachineDeployment metadata: diff --git a/pkg/providers/nutanix/testdata/expected_results_external_etcd_md.yaml b/pkg/providers/nutanix/testdata/expected_results_external_etcd_md.yaml index df8b0fb12e3f..d44dd6e0929b 100644 --- a/pkg/providers/nutanix/testdata/expected_results_external_etcd_md.yaml +++ b/pkg/providers/nutanix/testdata/expected_results_external_etcd_md.yaml @@ -1,4 +1,3 @@ - apiVersion: cluster.x-k8s.io/v1beta1 kind: MachineDeployment metadata: diff --git a/pkg/providers/nutanix/testdata/expected_results_external_etcd_with_optional_md.yaml b/pkg/providers/nutanix/testdata/expected_results_external_etcd_with_optional_md.yaml index f56bd184057e..09a5182bee5b 100644 --- a/pkg/providers/nutanix/testdata/expected_results_external_etcd_with_optional_md.yaml +++ b/pkg/providers/nutanix/testdata/expected_results_external_etcd_with_optional_md.yaml @@ -1,4 +1,3 @@ - apiVersion: cluster.x-k8s.io/v1beta1 kind: MachineDeployment metadata: diff --git a/pkg/providers/nutanix/testdata/expected_results_multi_worker_fds.yaml b/pkg/providers/nutanix/testdata/expected_results_multi_worker_fds.yaml new file mode 100644 index 000000000000..68739c610f69 --- /dev/null +++ b/pkg/providers/nutanix/testdata/expected_results_multi_worker_fds.yaml @@ -0,0 +1,627 @@ +apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 +kind: NutanixCluster +metadata: + name: "eksa-unit-test" + namespace: "eksa-system" +spec: + failureDomains: + - name: "pe1" + cluster: + type: "name" + name: "prism-cluster" + subnets: + - type: "uuid" + uuid: "2d166190-7759-4dc6-b835-923262d6b497" + controlPlane: true + - name: "pe2" + cluster: + type: "uuid" + uuid: "4d69ca7d-022f-49d1-a454-74535993bda4" + subnets: + - type: "name" + name: "prism-subnet" + controlPlane: true + prismCentral: + address: "prism.nutanix.com" + port: 9440 + insecure: false + credentialRef: + name: "capx-eksa-unit-test" + kind: Secret + controlPlaneEndpoint: + host: "test-ip" + port: 6443 +--- +apiVersion: cluster.x-k8s.io/v1beta1 +kind: Cluster +metadata: + labels: + cluster.x-k8s.io/cluster-name: "eksa-unit-test" + name: "eksa-unit-test" + namespace: "eksa-system" +spec: + clusterNetwork: + services: + cidrBlocks: [10.96.0.0/12] + pods: + cidrBlocks: [192.168.0.0/16] + serviceDomain: "cluster.local" + controlPlaneRef: + apiVersion: controlplane.cluster.x-k8s.io/v1beta1 + kind: KubeadmControlPlane + name: "eksa-unit-test" + infrastructureRef: + apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + kind: NutanixCluster + name: "eksa-unit-test" +--- +apiVersion: controlplane.cluster.x-k8s.io/v1beta1 +kind: KubeadmControlPlane +metadata: + name: "eksa-unit-test" + namespace: "eksa-system" +spec: + replicas: 3 + version: "v1.19.8-eks-1-19-4" + machineTemplate: + infrastructureRef: + apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + kind: NutanixMachineTemplate + name: "" + kubeadmConfigSpec: + clusterConfiguration: + imageRepository: "public.ecr.aws/eks-distro/kubernetes" + apiServer: + certSANs: + - localhost + - 127.0.0.1 + - 0.0.0.0 + extraArgs: + cloud-provider: external + audit-policy-file: /etc/kubernetes/audit-policy.yaml + audit-log-path: /var/log/kubernetes/api-audit.log + audit-log-maxage: "30" + audit-log-maxbackup: "10" + audit-log-maxsize: "512" + extraVolumes: + - hostPath: /etc/kubernetes/audit-policy.yaml + mountPath: /etc/kubernetes/audit-policy.yaml + name: audit-policy + pathType: File + readOnly: true + - hostPath: /var/log/kubernetes + mountPath: /var/log/kubernetes + name: audit-log-dir + pathType: DirectoryOrCreate + readOnly: false + controllerManager: + extraArgs: + cloud-provider: external + enable-hostpath-provisioner: "true" + dns: + imageRepository: public.ecr.aws/eks-distro/coredns + imageTag: v1.8.0-eks-1-19-4 + etcd: + local: + imageRepository: public.ecr.aws/eks-distro/etcd-io + imageTag: v3.4.14-eks-1-19-4 + files: + - content: | + apiVersion: v1 + kind: Pod + metadata: + creationTimestamp: null + name: kube-vip + namespace: kube-system + spec: + containers: + - name: kube-vip + image: + imagePullPolicy: IfNotPresent + args: + - manager + env: + - name: vip_arp + value: "true" + - name: address + value: "test-ip" + - name: port + value: "6443" + - name: vip_cidr + value: "32" + - name: cp_enable + value: "true" + - name: cp_namespace + value: kube-system + - name: vip_ddns + value: "false" + - name: vip_leaderelection + value: "true" + - name: vip_leaseduration + value: "15" + - name: vip_renewdeadline + value: "10" + - name: vip_retryperiod + value: "2" + - name: svc_enable + value: "false" + - name: lb_enable + value: "false" + securityContext: + capabilities: + add: + - NET_ADMIN + - SYS_TIME + - NET_RAW + volumeMounts: + - mountPath: /etc/kubernetes/admin.conf + name: kubeconfig + resources: {} + hostNetwork: true + volumes: + - name: kubeconfig + hostPath: + type: FileOrCreate + path: /etc/kubernetes/admin.conf + status: {} + owner: root:root + path: /etc/kubernetes/manifests/kube-vip.yaml + - content: | + apiVersion: audit.k8s.io/v1beta1 + kind: Policy + rules: + # Log aws-auth configmap changes + - level: RequestResponse + namespaces: ["kube-system"] + verbs: ["update", "patch", "delete"] + resources: + - group: "" # core + resources: ["configmaps"] + resourceNames: ["aws-auth"] + omitStages: + - "RequestReceived" + # The following requests were manually identified as high-volume and low-risk, + # so drop them. + - level: None + users: ["system:kube-proxy"] + verbs: ["watch"] + resources: + - group: "" # core + resources: ["endpoints", "services", "services/status"] + - level: None + users: ["kubelet"] # legacy kubelet identity + verbs: ["get"] + resources: + - group: "" # core + resources: ["nodes", "nodes/status"] + - level: None + userGroups: ["system:nodes"] + verbs: ["get"] + resources: + - group: "" # core + resources: ["nodes", "nodes/status"] + - level: None + users: + - system:kube-controller-manager + - system:kube-scheduler + - system:serviceaccount:kube-system:endpoint-controller + verbs: ["get", "update"] + namespaces: ["kube-system"] + resources: + - group: "" # core + resources: ["endpoints"] + - level: None + users: ["system:apiserver"] + verbs: ["get"] + resources: + - group: "" # core + resources: ["namespaces", "namespaces/status", "namespaces/finalize"] + # Don't log HPA fetching metrics. + - level: None + users: + - system:kube-controller-manager + verbs: ["get", "list"] + resources: + - group: "metrics.k8s.io" + # Don't log these read-only URLs. + - level: None + nonResourceURLs: + - /healthz* + - /version + - /swagger* + # Don't log events requests. + - level: None + resources: + - group: "" # core + resources: ["events"] + # node and pod status calls from nodes are high-volume and can be large, don't log responses for expected updates from nodes + - level: Request + users: ["kubelet", "system:node-problem-detector", "system:serviceaccount:kube-system:node-problem-detector"] + verbs: ["update","patch"] + resources: + - group: "" # core + resources: ["nodes/status", "pods/status"] + omitStages: + - "RequestReceived" + - level: Request + userGroups: ["system:nodes"] + verbs: ["update","patch"] + resources: + - group: "" # core + resources: ["nodes/status", "pods/status"] + omitStages: + - "RequestReceived" + # deletecollection calls can be large, don't log responses for expected namespace deletions + - level: Request + users: ["system:serviceaccount:kube-system:namespace-controller"] + verbs: ["deletecollection"] + omitStages: + - "RequestReceived" + # Secrets, ConfigMaps, and TokenReviews can contain sensitive & binary data, + # so only log at the Metadata level. + - level: Metadata + resources: + - group: "" # core + resources: ["secrets", "configmaps"] + - group: authentication.k8s.io + resources: ["tokenreviews"] + omitStages: + - "RequestReceived" + - level: Request + resources: + - group: "" + resources: ["serviceaccounts/token"] + # Get repsonses can be large; skip them. + - level: Request + verbs: ["get", "list", "watch"] + resources: + - group: "" # core + - group: "admissionregistration.k8s.io" + - group: "apiextensions.k8s.io" + - group: "apiregistration.k8s.io" + - group: "apps" + - group: "authentication.k8s.io" + - group: "authorization.k8s.io" + - group: "autoscaling" + - group: "batch" + - group: "certificates.k8s.io" + - group: "extensions" + - group: "metrics.k8s.io" + - group: "networking.k8s.io" + - group: "policy" + - group: "rbac.authorization.k8s.io" + - group: "scheduling.k8s.io" + - group: "settings.k8s.io" + - group: "storage.k8s.io" + omitStages: + - "RequestReceived" + # Default level for known APIs + - level: RequestResponse + resources: + - group: "" # core + - group: "admissionregistration.k8s.io" + - group: "apiextensions.k8s.io" + - group: "apiregistration.k8s.io" + - group: "apps" + - group: "authentication.k8s.io" + - group: "authorization.k8s.io" + - group: "autoscaling" + - group: "batch" + - group: "certificates.k8s.io" + - group: "extensions" + - group: "metrics.k8s.io" + - group: "networking.k8s.io" + - group: "policy" + - group: "rbac.authorization.k8s.io" + - group: "scheduling.k8s.io" + - group: "settings.k8s.io" + - group: "storage.k8s.io" + omitStages: + - "RequestReceived" + # Default level for all other requests. + - level: Metadata + omitStages: + - "RequestReceived" + owner: root:root + path: /etc/kubernetes/audit-policy.yaml + initConfiguration: + nodeRegistration: + kubeletExtraArgs: + cloud-provider: external + # We have to pin the cgroupDriver to cgroupfs as kubeadm >=1.21 defaults to systemd + # kind will implement systemd support in: https://github.com/kubernetes-sigs/kind/issues/1726 + #cgroup-driver: cgroupfs + eviction-hard: nodefs.available<0%,nodefs.inodesFree<0%,imagefs.available<0% + tls-cipher-suites: TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 + joinConfiguration: + nodeRegistration: + criSocket: /var/run/containerd/containerd.sock + kubeletExtraArgs: + cloud-provider: external + read-only-port: "0" + anonymous-auth: "false" + tls-cipher-suites: TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 + name: "{{ ds.meta_data.hostname }}" + users: + - name: "mySshUsername" + lockPassword: false + sudo: ALL=(ALL) NOPASSWD:ALL + sshAuthorizedKeys: + - "mySshAuthorizedKey" + preKubeadmCommands: + - hostnamectl set-hostname "{{ ds.meta_data.hostname }}" + - echo "::1 ipv6-localhost ipv6-loopback" >/etc/hosts + - echo "127.0.0.1 localhost" >>/etc/hosts + - echo "127.0.0.1 {{ ds.meta_data.hostname }}" >> /etc/hosts + postKubeadmCommands: + - echo export KUBECONFIG=/etc/kubernetes/admin.conf >> /root/.bashrc + useExperimentalRetryJoin: true +--- +apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 +kind: NutanixMachineTemplate +metadata: + name: "" + namespace: "eksa-system" +spec: + template: + spec: + providerID: "nutanix://eksa-unit-test-m1" + vcpusPerSocket: 1 + vcpuSockets: 4 + memorySize: 8Gi + systemDiskSize: 40Gi + image: + type: name + name: "prism-image" + + cluster: + type: name + name: "prism-cluster" + subnet: + - type: name + name: "prism-subnet" +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: eksa-unit-test-nutanix-ccm + namespace: "eksa-system" +data: + nutanix-ccm.yaml: | + --- + apiVersion: v1 + kind: ServiceAccount + metadata: + name: cloud-controller-manager + namespace: kube-system + --- + kind: ConfigMap + apiVersion: v1 + metadata: + name: nutanix-config + namespace: kube-system + data: + nutanix_config.json: |- + { + "prismCentral": { + "address": "prism.nutanix.com", + "port": 9440, + "insecure": false, + "credentialRef": { + "kind": "secret", + "name": "nutanix-creds", + "namespace": "kube-system" + } + }, + "enableCustomLabeling": false, + "topologyDiscovery": { + "type": "Prism" + } + } + --- + apiVersion: rbac.authorization.k8s.io/v1 + kind: ClusterRole + metadata: + annotations: + rbac.authorization.kubernetes.io/autoupdate: "true" + name: system:cloud-controller-manager + rules: + - apiGroups: + - "" + resources: + - secrets + verbs: + - get + - list + - watch + - apiGroups: + - "" + resources: + - configmaps + verbs: + - get + - list + - watch + - apiGroups: + - "" + resources: + - events + verbs: + - create + - patch + - update + - apiGroups: + - "" + resources: + - nodes + verbs: + - "*" + - apiGroups: + - "" + resources: + - nodes/status + verbs: + - patch + - apiGroups: + - "" + resources: + - serviceaccounts + verbs: + - create + - apiGroups: + - "" + resources: + - endpoints + verbs: + - create + - get + - list + - watch + - update + - apiGroups: + - coordination.k8s.io + resources: + - leases + verbs: + - get + - list + - watch + - create + - update + - patch + - delete + --- + kind: ClusterRoleBinding + apiVersion: rbac.authorization.k8s.io/v1 + metadata: + name: system:cloud-controller-manager + roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: system:cloud-controller-manager + subjects: + - kind: ServiceAccount + name: cloud-controller-manager + namespace: kube-system + --- + apiVersion: apps/v1 + kind: Deployment + metadata: + labels: + k8s-app: nutanix-cloud-controller-manager + name: nutanix-cloud-controller-manager + namespace: kube-system + spec: + replicas: 1 + selector: + matchLabels: + k8s-app: nutanix-cloud-controller-manager + strategy: + type: Recreate + template: + metadata: + labels: + k8s-app: nutanix-cloud-controller-manager + spec: + hostNetwork: true + priorityClassName: system-cluster-critical + nodeSelector: + node-role.kubernetes.io/control-plane: "" + serviceAccountName: cloud-controller-manager + affinity: + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchLabels: + k8s-app: nutanix-cloud-controller-manager + topologyKey: kubernetes.io/hostname + dnsPolicy: Default + tolerations: + - effect: NoSchedule + key: node-role.kubernetes.io/master + operator: Exists + - effect: NoSchedule + key: node-role.kubernetes.io/control-plane + operator: Exists + - effect: NoExecute + key: node.kubernetes.io/unreachable + operator: Exists + tolerationSeconds: 120 + - effect: NoExecute + key: node.kubernetes.io/not-ready + operator: Exists + tolerationSeconds: 120 + - effect: NoSchedule + key: node.cloudprovider.kubernetes.io/uninitialized + operator: Exists + - effect: NoSchedule + key: node.kubernetes.io/not-ready + operator: Exists + containers: + - image: "" + imagePullPolicy: IfNotPresent + name: nutanix-cloud-controller-manager + env: + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + args: + - "--leader-elect=true" + - "--cloud-config=/etc/cloud/nutanix_config.json" + resources: + requests: + cpu: 100m + memory: 50Mi + volumeMounts: + - mountPath: /etc/cloud + name: nutanix-config-volume + readOnly: true + volumes: + - name: nutanix-config-volume + configMap: + name: nutanix-config +--- +apiVersion: addons.cluster.x-k8s.io/v1beta1 +kind: ClusterResourceSet +metadata: + name: eksa-unit-test-nutanix-ccm-crs + namespace: "eksa-system" +spec: + clusterSelector: + matchLabels: + cluster.x-k8s.io/cluster-name: "eksa-unit-test" + resources: + - kind: ConfigMap + name: eksa-unit-test-nutanix-ccm + - kind: Secret + name: eksa-unit-test-nutanix-ccm-secret + strategy: Reconcile +--- +apiVersion: v1 +kind: Secret +metadata: + name: "eksa-unit-test-nutanix-ccm-secret" + namespace: "eksa-system" +stringData: + nutanix-ccm-secret.yaml: | + apiVersion: v1 + kind: Secret + metadata: + name: nutanix-creds + namespace: kube-system + stringData: + credentials: |- + [ + { + "type": "basic_auth", + "data": { + "prismCentral": { + "username": "admin", + "password": "password" + }, + "prismElements": null + } + } + ] +type: addons.cluster.x-k8s.io/resource-set diff --git a/pkg/providers/nutanix/testdata/expected_results_multi_worker_fds_md.yaml b/pkg/providers/nutanix/testdata/expected_results_multi_worker_fds_md.yaml new file mode 100644 index 000000000000..868e4d1a4ce1 --- /dev/null +++ b/pkg/providers/nutanix/testdata/expected_results_multi_worker_fds_md.yaml @@ -0,0 +1,356 @@ +apiVersion: cluster.x-k8s.io/v1beta1 +kind: MachineDeployment +metadata: + labels: + cluster.x-k8s.io/cluster-name: "eksa-unit-test" + name: "eksa-unit-test-eksa-unit-test-1-pe1" + namespace: "eksa-system" +spec: + clusterName: "eksa-unit-test" + replicas: 2 + selector: + matchLabels: {} + template: + metadata: + labels: + cluster.x-k8s.io/cluster-name: "eksa-unit-test" + spec: + failureDomain: "pe1" + bootstrap: + configRef: + apiVersion: bootstrap.cluster.x-k8s.io/v1beta1 + kind: KubeadmConfigTemplate + name: "eksa-unit-test" + clusterName: "eksa-unit-test" + infrastructureRef: + apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + kind: NutanixMachineTemplate + name: "eksa-unit-test-1-pe1" + version: "v1.19.8-eks-1-19-4" +--- +apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 +kind: NutanixMachineTemplate +metadata: + name: "eksa-unit-test-1-pe1" + namespace: "eksa-system" +spec: + template: + spec: + providerID: "nutanix://eksa-unit-test-m1" + vcpusPerSocket: 1 + vcpuSockets: 4 + memorySize: 8Gi + systemDiskSize: 40Gi + image: + type: name + name: "prism-image" + + cluster: + type: name + name: "prism-cluster" + subnet: + - type: uuid + uuid: "2d166190-7759-4dc6-b835-923262d6b497" +--- +apiVersion: cluster.x-k8s.io/v1beta1 +kind: MachineDeployment +metadata: + labels: + cluster.x-k8s.io/cluster-name: "eksa-unit-test" + name: "eksa-unit-test-eksa-unit-test-1-pe2" + namespace: "eksa-system" +spec: + clusterName: "eksa-unit-test" + replicas: 2 + selector: + matchLabels: {} + template: + metadata: + labels: + cluster.x-k8s.io/cluster-name: "eksa-unit-test" + spec: + failureDomain: "pe2" + bootstrap: + configRef: + apiVersion: bootstrap.cluster.x-k8s.io/v1beta1 + kind: KubeadmConfigTemplate + name: "eksa-unit-test" + clusterName: "eksa-unit-test" + infrastructureRef: + apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + kind: NutanixMachineTemplate + name: "eksa-unit-test-1-pe2" + version: "v1.19.8-eks-1-19-4" +--- +apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 +kind: NutanixMachineTemplate +metadata: + name: "eksa-unit-test-1-pe2" + namespace: "eksa-system" +spec: + template: + spec: + providerID: "nutanix://eksa-unit-test-m1" + vcpusPerSocket: 1 + vcpuSockets: 4 + memorySize: 8Gi + systemDiskSize: 40Gi + image: + type: name + name: "prism-image" + + cluster: + type: uuid + uuid: "4d69ca7d-022f-49d1-a454-74535993bda4" + + subnet: + - type: name + name: "prism-subnet" +--- +apiVersion: bootstrap.cluster.x-k8s.io/v1beta1 +kind: KubeadmConfigTemplate +metadata: + name: "eksa-unit-test" + namespace: "eksa-system" +spec: + template: + spec: + preKubeadmCommands: + - hostnamectl set-hostname "{{ ds.meta_data.hostname }}" + joinConfiguration: + nodeRegistration: + kubeletExtraArgs: + cloud-provider: external + # We have to pin the cgroupDriver to cgroupfs as kubeadm >=1.21 defaults to systemd + # kind will implement systemd support in: https://github.com/kubernetes-sigs/kind/issues/1726 + #cgroup-driver: cgroupfs + eviction-hard: nodefs.available<0%,nodefs.inodesFree<0%,imagefs.available<0% + tls-cipher-suites: TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 + name: '{{ ds.meta_data.hostname }}' + users: + - name: "mySshUsername" + lockPassword: false + sudo: ALL=(ALL) NOPASSWD:ALL + sshAuthorizedKeys: + - "mySshAuthorizedKey" + +--- +apiVersion: cluster.x-k8s.io/v1beta1 +kind: MachineDeployment +metadata: + labels: + cluster.x-k8s.io/cluster-name: "eksa-unit-test" + name: "eksa-unit-test-eksa-unit-test-2-pe1" + namespace: "eksa-system" +spec: + clusterName: "eksa-unit-test" + replicas: 2 + selector: + matchLabels: {} + template: + metadata: + labels: + cluster.x-k8s.io/cluster-name: "eksa-unit-test" + spec: + failureDomain: "pe1" + bootstrap: + configRef: + apiVersion: bootstrap.cluster.x-k8s.io/v1beta1 + kind: KubeadmConfigTemplate + name: "eksa-unit-test" + clusterName: "eksa-unit-test" + infrastructureRef: + apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + kind: NutanixMachineTemplate + name: "eksa-unit-test-2-pe1" + version: "v1.19.8-eks-1-19-4" +--- +apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 +kind: NutanixMachineTemplate +metadata: + name: "eksa-unit-test-2-pe1" + namespace: "eksa-system" +spec: + template: + spec: + providerID: "nutanix://eksa-unit-test-m1" + vcpusPerSocket: 1 + vcpuSockets: 2 + memorySize: 16Gi + systemDiskSize: 40Gi + image: + type: name + name: "prism-image" + + cluster: + type: name + name: "prism-cluster" + subnet: + - type: uuid + uuid: "2d166190-7759-4dc6-b835-923262d6b497" +--- +apiVersion: cluster.x-k8s.io/v1beta1 +kind: MachineDeployment +metadata: + labels: + cluster.x-k8s.io/cluster-name: "eksa-unit-test" + name: "eksa-unit-test-eksa-unit-test-2-pe2" + namespace: "eksa-system" +spec: + clusterName: "eksa-unit-test" + replicas: 1 + selector: + matchLabels: {} + template: + metadata: + labels: + cluster.x-k8s.io/cluster-name: "eksa-unit-test" + spec: + failureDomain: "pe2" + bootstrap: + configRef: + apiVersion: bootstrap.cluster.x-k8s.io/v1beta1 + kind: KubeadmConfigTemplate + name: "eksa-unit-test" + clusterName: "eksa-unit-test" + infrastructureRef: + apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + kind: NutanixMachineTemplate + name: "eksa-unit-test-2-pe2" + version: "v1.19.8-eks-1-19-4" +--- +apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 +kind: NutanixMachineTemplate +metadata: + name: "eksa-unit-test-2-pe2" + namespace: "eksa-system" +spec: + template: + spec: + providerID: "nutanix://eksa-unit-test-m1" + vcpusPerSocket: 1 + vcpuSockets: 2 + memorySize: 16Gi + systemDiskSize: 40Gi + image: + type: name + name: "prism-image" + + cluster: + type: uuid + uuid: "4d69ca7d-022f-49d1-a454-74535993bda4" + + subnet: + - type: name + name: "prism-subnet" +--- +apiVersion: bootstrap.cluster.x-k8s.io/v1beta1 +kind: KubeadmConfigTemplate +metadata: + name: "eksa-unit-test" + namespace: "eksa-system" +spec: + template: + spec: + preKubeadmCommands: + - hostnamectl set-hostname "{{ ds.meta_data.hostname }}" + joinConfiguration: + nodeRegistration: + kubeletExtraArgs: + cloud-provider: external + # We have to pin the cgroupDriver to cgroupfs as kubeadm >=1.21 defaults to systemd + # kind will implement systemd support in: https://github.com/kubernetes-sigs/kind/issues/1726 + #cgroup-driver: cgroupfs + eviction-hard: nodefs.available<0%,nodefs.inodesFree<0%,imagefs.available<0% + tls-cipher-suites: TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 + name: '{{ ds.meta_data.hostname }}' + users: + - name: "mySshUsername" + lockPassword: false + sudo: ALL=(ALL) NOPASSWD:ALL + sshAuthorizedKeys: + - "mySshAuthorizedKey" + +--- +apiVersion: cluster.x-k8s.io/v1beta1 +kind: MachineDeployment +metadata: + labels: + cluster.x-k8s.io/cluster-name: "eksa-unit-test" + name: "eksa-unit-test-eksa-unit-test-3-pe1" + namespace: "eksa-system" +spec: + clusterName: "eksa-unit-test" + replicas: 2 + selector: + matchLabels: {} + template: + metadata: + labels: + cluster.x-k8s.io/cluster-name: "eksa-unit-test" + spec: + failureDomain: "pe1" + bootstrap: + configRef: + apiVersion: bootstrap.cluster.x-k8s.io/v1beta1 + kind: KubeadmConfigTemplate + name: "eksa-unit-test" + clusterName: "eksa-unit-test" + infrastructureRef: + apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + kind: NutanixMachineTemplate + name: "eksa-unit-test-3-pe1" + version: "v1.19.8-eks-1-19-4" +--- +apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 +kind: NutanixMachineTemplate +metadata: + name: "eksa-unit-test-3-pe1" + namespace: "eksa-system" +spec: + template: + spec: + providerID: "nutanix://eksa-unit-test-m1" + vcpusPerSocket: 2 + vcpuSockets: 4 + memorySize: 4Gi + systemDiskSize: 40Gi + image: + type: name + name: "prism-image" + + cluster: + type: name + name: "prism-cluster" + subnet: + - type: uuid + uuid: "2d166190-7759-4dc6-b835-923262d6b497" +--- +apiVersion: bootstrap.cluster.x-k8s.io/v1beta1 +kind: KubeadmConfigTemplate +metadata: + name: "eksa-unit-test" + namespace: "eksa-system" +spec: + template: + spec: + preKubeadmCommands: + - hostnamectl set-hostname "{{ ds.meta_data.hostname }}" + joinConfiguration: + nodeRegistration: + kubeletExtraArgs: + cloud-provider: external + # We have to pin the cgroupDriver to cgroupfs as kubeadm >=1.21 defaults to systemd + # kind will implement systemd support in: https://github.com/kubernetes-sigs/kind/issues/1726 + #cgroup-driver: cgroupfs + eviction-hard: nodefs.available<0%,nodefs.inodesFree<0%,imagefs.available<0% + tls-cipher-suites: TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 + name: '{{ ds.meta_data.hostname }}' + users: + - name: "mySshUsername" + lockPassword: false + sudo: ALL=(ALL) NOPASSWD:ALL + sshAuthorizedKeys: + - "mySshAuthorizedKey" + +--- diff --git a/pkg/providers/nutanix/testdata/expected_results_node_taints_labels_md.yaml b/pkg/providers/nutanix/testdata/expected_results_node_taints_labels_md.yaml index 1b72133af2db..217297ef95fc 100644 --- a/pkg/providers/nutanix/testdata/expected_results_node_taints_labels_md.yaml +++ b/pkg/providers/nutanix/testdata/expected_results_node_taints_labels_md.yaml @@ -1,4 +1,3 @@ - apiVersion: cluster.x-k8s.io/v1beta1 kind: MachineDeployment metadata: diff --git a/pkg/providers/nutanix/testdata/expected_results_project_md.yaml b/pkg/providers/nutanix/testdata/expected_results_project_md.yaml index 96820e6828eb..c0228bea4f83 100644 --- a/pkg/providers/nutanix/testdata/expected_results_project_md.yaml +++ b/pkg/providers/nutanix/testdata/expected_results_project_md.yaml @@ -1,4 +1,3 @@ - apiVersion: cluster.x-k8s.io/v1beta1 kind: MachineDeployment metadata: diff --git a/pkg/providers/nutanix/testdata/expected_results_proxy_md.yaml b/pkg/providers/nutanix/testdata/expected_results_proxy_md.yaml index 75f5c3cf17fe..167041328fe5 100644 --- a/pkg/providers/nutanix/testdata/expected_results_proxy_md.yaml +++ b/pkg/providers/nutanix/testdata/expected_results_proxy_md.yaml @@ -1,4 +1,3 @@ - apiVersion: cluster.x-k8s.io/v1beta1 kind: MachineDeployment metadata: diff --git a/pkg/providers/nutanix/testdata/expected_results_registry_mirror_md.yaml b/pkg/providers/nutanix/testdata/expected_results_registry_mirror_md.yaml index 6c3ee4ac92a9..f5739c4dbba6 100644 --- a/pkg/providers/nutanix/testdata/expected_results_registry_mirror_md.yaml +++ b/pkg/providers/nutanix/testdata/expected_results_registry_mirror_md.yaml @@ -1,4 +1,3 @@ - apiVersion: cluster.x-k8s.io/v1beta1 kind: MachineDeployment metadata: diff --git a/pkg/providers/nutanix/testdata/expected_results_worker_fds_md.yaml b/pkg/providers/nutanix/testdata/expected_results_worker_fds_md.yaml index c527f420f388..d2caf0f9ec5d 100644 --- a/pkg/providers/nutanix/testdata/expected_results_worker_fds_md.yaml +++ b/pkg/providers/nutanix/testdata/expected_results_worker_fds_md.yaml @@ -1,4 +1,3 @@ - apiVersion: cluster.x-k8s.io/v1beta1 kind: MachineDeployment metadata: diff --git a/pkg/providers/nutanix/testdata/expected_wn.yaml b/pkg/providers/nutanix/testdata/expected_wn.yaml index a4d5b385dfe5..2419f3bd1cf0 100644 --- a/pkg/providers/nutanix/testdata/expected_wn.yaml +++ b/pkg/providers/nutanix/testdata/expected_wn.yaml @@ -1,4 +1,3 @@ - apiVersion: cluster.x-k8s.io/v1beta1 kind: MachineDeployment metadata: diff --git a/pkg/providers/nutanix/validator.go b/pkg/providers/nutanix/validator.go index 63228db8bd34..4723f3ee46f4 100644 --- a/pkg/providers/nutanix/validator.go +++ b/pkg/providers/nutanix/validator.go @@ -147,6 +147,7 @@ func (v *Validator) validateFailureDomains(ctx context.Context, client Client, s return err } + failureDomainCount := len(config.Spec.FailureDomains) for _, fd := range config.Spec.FailureDomains { if res := regexName.MatchString(fd.Name); !res { errorStr := `failure domain name should contains only small letters, digits, and hyphens. @@ -164,8 +165,9 @@ func (v *Validator) validateFailureDomains(ctx context.Context, client Client, s } } + workerMachineGroups := getWorkerMachineGroups(spec) for _, workerMachineGroupName := range fd.WorkerMachineGroups { - if err := v.validateWorkerMachineGroup(spec, workerMachineGroupName); err != nil { + if err := v.validateWorkerMachineGroup(workerMachineGroups, workerMachineGroupName, failureDomainCount); err != nil { return err } } @@ -174,13 +176,15 @@ func (v *Validator) validateFailureDomains(ctx context.Context, client Client, s return nil } -func (v *Validator) validateWorkerMachineGroup(spec *cluster.Spec, workerMachineGroupName string) error { - workerMachineGroupNames := getWorkerMachineGroupNames(spec) - - if !sliceContains(workerMachineGroupNames, workerMachineGroupName) { +func (v *Validator) validateWorkerMachineGroup(workerMachineGroups map[string]anywherev1.WorkerNodeGroupConfiguration, workerMachineGroupName string, fdCount int) error { + if _, ok := workerMachineGroups[workerMachineGroupName]; !ok { return fmt.Errorf("worker machine group %s not found in the cluster worker node group definitions", workerMachineGroupName) } + if workerMachineGroups[workerMachineGroupName].Count != nil && *workerMachineGroups[workerMachineGroupName].Count > fdCount { + return fmt.Errorf("count %d of machines in workerNodeGroupConfiguration %s shouldn't be greater than the failure domain count %d where those machines should be spreaded accross", *workerMachineGroups[workerMachineGroupName].Count, workerMachineGroupName, fdCount) + } + return nil } @@ -711,11 +715,11 @@ func (v *Validator) validateFreeGPU(ctx context.Context, v3Client Client, cluste func (v *Validator) validateUpgradeRolloutStrategy(clusterSpec *cluster.Spec) error { if clusterSpec.Cluster.Spec.ControlPlaneConfiguration.UpgradeRolloutStrategy != nil { - return fmt.Errorf("Upgrade rollout strategy customization is not supported for nutanix provider") + return fmt.Errorf("upgrade rollout strategy customization is not supported for nutanix provider") } for _, workerNodeGroupConfiguration := range clusterSpec.Cluster.Spec.WorkerNodeGroupConfigurations { if workerNodeGroupConfiguration.UpgradeRolloutStrategy != nil { - return fmt.Errorf("Upgrade rollout strategy customization is not supported for nutanix provider") + return fmt.Errorf("upgrade rollout strategy customization is not supported for nutanix provider") } } return nil @@ -755,12 +759,12 @@ func findSubnetUUIDByName(ctx context.Context, v3Client Client, clusterUUID, sub return res.Entities[0].Metadata.UUID, nil } -// getWorkerMachineGroupNames retrieves the worker machine group names from the cluster spec. -func getWorkerMachineGroupNames(spec *cluster.Spec) []string { - result := make([]string, 0) +// getWorkerMachineGroups retrieves the worker machine group names from the cluster spec. +func getWorkerMachineGroups(spec *cluster.Spec) map[string]anywherev1.WorkerNodeGroupConfiguration { + result := make(map[string]anywherev1.WorkerNodeGroupConfiguration) for _, workerNodeGroupConf := range spec.Cluster.Spec.WorkerNodeGroupConfigurations { - result = append(result, workerNodeGroupConf.Name) + result[workerNodeGroupConf.MachineGroupRef.Name] = workerNodeGroupConf } return result @@ -855,15 +859,6 @@ func findProjectUUIDByName(ctx context.Context, v3Client Client, projectName str return res.Entities[0].Metadata.UUID, nil } -func sliceContains(slice []string, element string) bool { - for _, sliceElement := range slice { - if sliceElement == element { - return true - } - } - return false -} - func isRequestedGPUAssignable(gpu v3.GPU, requestedGpu anywherev1.NutanixGPUIdentifier) bool { if requestedGpu.Type == anywherev1.NutanixGPUIdentifierDeviceID { return (*gpu.DeviceID == *requestedGpu.DeviceID) && gpu.Assignable diff --git a/pkg/providers/nutanix/validator_test.go b/pkg/providers/nutanix/validator_test.go index e3d6b89e4197..be2606fc9561 100644 --- a/pkg/providers/nutanix/validator_test.go +++ b/pkg/providers/nutanix/validator_test.go @@ -222,40 +222,6 @@ func fakeProjectList() *v3.ProjectListResponse { } } -func TestSliceContainsFunc(t *testing.T) { - tests := []struct { - name string - slice []string - value string - expected bool - }{ - { - name: "empty slice", - slice: []string{}, - value: "test", - expected: false, - }, - { - name: "slice contains value", - slice: []string{"test", "test1", "test2"}, - value: "test", - expected: true, - }, - { - name: "slice does not contain value", - slice: []string{"test", "test2", "test3"}, - value: "test1", - expected: false, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - assert.Equal(t, tt.expected, sliceContains(tt.slice, tt.value)) - }) - } -} - func TestNutanixValidatorValidateMachineConfig(t *testing.T) { ctrl := gomock.NewController(t) @@ -1793,3 +1759,44 @@ func TestValidateClusterMachineConfigsSuccess(t *testing.T) { t.Fatalf("validation should pass: %v", err) } } + +func TestValidateMachineConfigFailureDomainsWrongCount(t *testing.T) { + ctx := context.Background() + clusterConfigFile := "testdata/eksa-cluster-multi-worker-fds.yaml" + clusterSpec := test.NewFullClusterSpec(t, clusterConfigFile) + + ctrl := gomock.NewController(t) + mockClient := mocknutanix.NewMockClient(ctrl) + mockClient.EXPECT().GetCurrentLoggedInUser(gomock.Any()).Return(&v3.UserIntentResponse{}, nil).AnyTimes() + mockClient.EXPECT().ListCluster(gomock.Any(), gomock.Any()).DoAndReturn( + func(_ context.Context, filters *v3.DSMetadata) (*v3.ClusterListIntentResponse, error) { + return fakeClusterListForDCTest(filters.Filter) + }, + ).AnyTimes() + mockClient.EXPECT().ListSubnet(gomock.Any(), gomock.Any()).DoAndReturn( + func(_ context.Context, filters *v3.DSMetadata) (*v3.SubnetListIntentResponse, error) { + return fakeSubnetListForDCTest(filters.Filter) + }, + ).AnyTimes() + mockClient.EXPECT().GetSubnet(gomock.Any(), gomock.Eq("2d166190-7759-4dc6-b835-923262d6b497")).Return(nil, nil).AnyTimes() + mockClient.EXPECT().GetCluster(gomock.Any(), gomock.Eq("4d69ca7d-022f-49d1-a454-74535993bda4")).Return(nil, nil).AnyTimes() + + mockTLSValidator := mockCrypto.NewMockTlsValidator(ctrl) + mockTLSValidator.EXPECT().ValidateCert(gomock.Any(), gomock.Any(), gomock.Any()).Return(nil).AnyTimes() + + mockTransport := mocknutanix.NewMockRoundTripper(ctrl) + mockTransport.EXPECT().RoundTrip(gomock.Any()).Return(&http.Response{}, nil).AnyTimes() + + mockHTTPClient := &http.Client{Transport: mockTransport} + clientCache := &ClientCache{clients: map[string]Client{"test": mockClient}} + validator := NewValidator(clientCache, mockTLSValidator, mockHTTPClient) + + for i := 0; i < len(clusterSpec.Cluster.Spec.WorkerNodeGroupConfigurations); i++ { + clusterSpec.Cluster.Spec.WorkerNodeGroupConfigurations[i].Count = utils.IntPtr(20) + } + + err := validator.validateFailureDomains(ctx, clientCache.clients["test"], clusterSpec) + if err == nil { + t.Fatalf("validation should not pass: %v", err) + } +}