diff --git a/Makefile b/Makefile index b9cd4775d7..a4cb08fc73 100644 --- a/Makefile +++ b/Makefile @@ -374,6 +374,7 @@ generate-e2e-templates-main: $(KUSTOMIZE) ## Generate test templates for the mai cp "$(RELEASE_DIR)/main/cluster-template-ignition.yaml" "$(E2E_GOVMOMI_TEMPLATE_DIR)/main/base/cluster-template-ignition.yaml" "$(KUSTOMIZE)" --load-restrictor LoadRestrictionsNone build "$(E2E_GOVMOMI_TEMPLATE_DIR)/main/base" > "$(E2E_GOVMOMI_TEMPLATE_DIR)/main/cluster-template.yaml" "$(KUSTOMIZE)" --load-restrictor LoadRestrictionsNone build "$(E2E_GOVMOMI_TEMPLATE_DIR)/main/hw-upgrade" > "$(E2E_GOVMOMI_TEMPLATE_DIR)/main/cluster-template-hw-upgrade.yaml" + "$(KUSTOMIZE)" --load-restrictor LoadRestrictionsNone build "$(E2E_GOVMOMI_TEMPLATE_DIR)/main/multi-disk" > "$(E2E_GOVMOMI_TEMPLATE_DIR)/main/cluster-template-multi-disk.yaml" "$(KUSTOMIZE)" --load-restrictor LoadRestrictionsNone build "$(E2E_GOVMOMI_TEMPLATE_DIR)/main/storage-policy" > "$(E2E_GOVMOMI_TEMPLATE_DIR)/main/cluster-template-storage-policy.yaml" "$(KUSTOMIZE)" --load-restrictor LoadRestrictionsNone build "$(E2E_GOVMOMI_TEMPLATE_DIR)/main/conformance" > "$(E2E_GOVMOMI_TEMPLATE_DIR)/main/cluster-template-conformance.yaml" # Since CAPI uses different flavor names for KCP and MD remediation using MHC diff --git a/pkg/services/govmomi/vcenter/clone_test.go b/pkg/services/govmomi/vcenter/clone_test.go index 292dc00b6a..5251c210e7 100644 --- a/pkg/services/govmomi/vcenter/clone_test.go +++ b/pkg/services/govmomi/vcenter/clone_test.go @@ -19,6 +19,7 @@ package vcenter import ( ctx "context" "crypto/tls" + "fmt" "testing" "github.com/vmware/govmomi/object" @@ -60,25 +61,30 @@ func TestGetDiskSpec(t *testing.T) { additionalCloneDiskSizes []int32 name string disks object.VirtualDeviceList + dataDisks []infrav1.VSphereDisk + expectedDiskCount int err string }{ { - name: "Successfully clone template with correct disk requirements", - disks: defaultDisks, - cloneDiskSize: defaultSizeGiB, - expectDevice: true, + name: "Successfully clone template with correct disk requirements", + disks: defaultDisks, + cloneDiskSize: defaultSizeGiB, + expectDevice: true, + expectedDiskCount: 1, }, { - name: "Successfully clone template and increase disk requirements", - disks: defaultDisks, - cloneDiskSize: defaultSizeGiB + 1, - expectDevice: true, + name: "Successfully clone template and increase disk requirements", + disks: defaultDisks, + cloneDiskSize: defaultSizeGiB + 1, + expectDevice: true, + expectedDiskCount: 1, }, { - name: "Successfully clone template with no explicit disk requirements", - disks: defaultDisks, - cloneDiskSize: 0, - expectDevice: true, + name: "Successfully clone template with no explicit disk requirements", + disks: defaultDisks, + cloneDiskSize: 0, + expectDevice: true, + expectedDiskCount: 1, }, { name: "Fail to clone template with lower disk requirements then on template", @@ -98,6 +104,7 @@ func TestGetDiskSpec(t *testing.T) { cloneDiskSize: defaultSizeGiB + 1, additionalCloneDiskSizes: []int32{defaultSizeGiB + 1}, expectDevice: true, + expectedDiskCount: 2, }, { name: "Fails to clone template and decrease second disk size", @@ -121,25 +128,187 @@ func TestGetDiskSpec(t *testing.T) { }, } vmContext := &capvcontext.VMContext{VSphereVM: vsphereVM} - devices, err := getDiskSpec(vmContext, tc.disks) + deviceResults, err := getDiskSpec(vmContext, tc.disks) if (tc.err != "" && err == nil) || (tc.err == "" && err != nil) || (err != nil && tc.err != err.Error()) { t.Fatalf("Expected to get '%v' error from getDiskSpec, got: '%v'", tc.err, err) } - if deviceFound := len(devices) != 0; tc.expectDevice != deviceFound { - t.Fatalf("Expected to get a device: %v, but got: '%#v'", tc.expectDevice, devices) + if deviceFound := len(deviceResults) != 0; tc.expectDevice != deviceFound { + t.Fatalf("Expected to get a device: %v, but got: '%#v'", tc.expectDevice, deviceResults) } if tc.expectDevice { - primaryDevice := devices[0] + primaryDevice := deviceResults[0] validateDiskSpec(t, primaryDevice, tc.cloneDiskSize) if len(tc.additionalCloneDiskSizes) != 0 { - secondaryDevice := devices[1] + secondaryDevice := deviceResults[1] validateDiskSpec(t, secondaryDevice, tc.additionalCloneDiskSizes[0]) } + + // Check number of disks present + if len(deviceResults) != tc.expectedDiskCount { + t.Fatalf("Expected device count to be %v, but found %v", tc.expectedDiskCount, len(deviceResults)) + } } }) } } +func TestAddingDataDisks(t *testing.T) { + model, session, server := initSimulator(t) + t.Cleanup(model.Remove) + t.Cleanup(server.Close) + vm := simulator.Map.Any("VirtualMachine").(*simulator.VirtualMachine) + machine := object.NewVirtualMachine(session.Client.Client, vm.Reference()) + + deviceList, err := machine.Device(ctx.TODO()) + if err != nil { + t.Fatalf("Failed to obtain vm devices: %v", err) + } + + // Find primary disk and get controller + disks := deviceList.SelectByType((*types.VirtualDisk)(nil)) + primaryDisk := disks[0].(*types.VirtualDisk) + controller, ok := deviceList.FindByKey(primaryDisk.ControllerKey).(types.BaseVirtualController) + if !ok { + t.Fatalf("unable to get controller for test") + } + + testCases := []struct { + name string + devices object.VirtualDeviceList + controller types.BaseVirtualController + dataDisks []infrav1.VSphereDisk + expectedUnitNumber []int + err string + }{ + { + name: "Add data disk with 1 ova disk", + devices: deviceList, + controller: controller, + dataDisks: createDataDiskDefinitions(1), + expectedUnitNumber: []int{1}, + }, + { + name: "Add data disk with 2 ova disk", + devices: createAdditionalDisks(deviceList, controller, 1), + controller: controller, + dataDisks: createDataDiskDefinitions(1), + expectedUnitNumber: []int{2}, + }, + { + name: "Add multiple data disk with 1 ova disk", + devices: deviceList, + controller: controller, + dataDisks: createDataDiskDefinitions(2), + expectedUnitNumber: []int{1, 2}, + }, + { + name: "Add too many data disks with 1 ova disk", + devices: deviceList, + controller: controller, + dataDisks: createDataDiskDefinitions(30), + err: "all unit numbers are already in-use", + }, + { + name: "Add data disk with no ova disk", + devices: nil, + controller: nil, + dataDisks: createDataDiskDefinitions(1), + err: "Invalid disk count: 0", + }, + { + name: "Add too many data disks with 1 ova disk", + devices: deviceList, + controller: controller, + dataDisks: createDataDiskDefinitions(40), + err: "all unit numbers are already in-use", + }, + } + + for _, test := range testCases { + tc := test + t.Run(tc.name, func(t *testing.T) { + var funcError error + + // Create the data disks + newDisks, funcError := createDataDisks(ctx.TODO(), tc.dataDisks, tc.devices) + if (tc.err != "" && funcError == nil) || (tc.err == "" && funcError != nil) || (funcError != nil && tc.err != funcError.Error()) { + t.Fatalf("Expected to get '%v' error from assignUnitNumber, got: '%v'", tc.err, funcError) + } + + if tc.err == "" && funcError == nil { + // Check number of disks present + if len(newDisks) != len(tc.dataDisks) { + t.Fatalf("Expected device count to be %v, but found %v", len(tc.dataDisks), len(newDisks)) + } + + // Validate the configs of new data disks + for index, disk := range newDisks { + // Check disk size matches original request + vd := disk.GetVirtualDeviceConfigSpec().Device.(*types.VirtualDisk) + expectedSize := int64(tc.dataDisks[index].SizeGiB * 1024 * 1024) + if vd.CapacityInKB != expectedSize { + t.Fatalf("Expected disk size (KB) %d to match %d", vd.CapacityInKB, expectedSize) + } + + // Check unit number + unitNumber := *disk.GetVirtualDeviceConfigSpec().Device.GetVirtualDevice().UnitNumber + if tc.err == "" && unitNumber != int32(tc.expectedUnitNumber[index]) { + t.Fatalf("Expected to get unitNumber '%d' error from assignUnitNumber, got: '%d'", tc.expectedUnitNumber[index], unitNumber) + } + } + } + }) + } +} + +func createAdditionalDisks(devices object.VirtualDeviceList, controller types.BaseVirtualController, numOfDisks int) object.VirtualDeviceList { + deviceList := devices + disks := devices.SelectByType((*types.VirtualDisk)(nil)) + primaryDisk := disks[0].(*types.VirtualDisk) + + for i := 0; i < numOfDisks; i++ { + newDevice := createVirtualDisk(primaryDisk.ControllerKey+1, controller, 10) + newUnitNumber := *primaryDisk.UnitNumber + int32(i+1) + newDevice.UnitNumber = &newUnitNumber + deviceList = append(deviceList, newDevice) + } + return deviceList +} + +func createVirtualDisk(key int32, controller types.BaseVirtualController, diskSize int32) *types.VirtualDisk { + dev := &types.VirtualDisk{ + VirtualDevice: types.VirtualDevice{ + Key: key, + Backing: &types.VirtualDiskFlatVer2BackingInfo{ + DiskMode: string(types.VirtualDiskModePersistent), + ThinProvisioned: types.NewBool(true), + VirtualDeviceFileBackingInfo: types.VirtualDeviceFileBackingInfo{ + FileName: "", + }, + }, + }, + CapacityInKB: int64(diskSize) * 1024 * 1024, + } + + if controller != nil { + dev.VirtualDevice.ControllerKey = controller.GetVirtualController().Key + } + return dev +} + +func createDataDiskDefinitions(numOfDataDisks int) []infrav1.VSphereDisk { + disks := []infrav1.VSphereDisk{} + + for i := 0; i < numOfDataDisks; i++ { + disk := infrav1.VSphereDisk{ + Name: fmt.Sprintf("disk_%d", i), + SizeGiB: 10 * int32(i), + } + disks = append(disks, disk) + } + return disks +} + func validateDiskSpec(t *testing.T, device types.BaseVirtualDeviceConfigSpec, cloneDiskSize int32) { t.Helper() disk := device.GetVirtualDeviceConfigSpec().Device.(*types.VirtualDisk) diff --git a/test/e2e/config/vsphere.yaml b/test/e2e/config/vsphere.yaml index 8e91e48e3b..3ccda45e9f 100644 --- a/test/e2e/config/vsphere.yaml +++ b/test/e2e/config/vsphere.yaml @@ -173,6 +173,7 @@ providers: - sourcePath: "../../../test/e2e/data/infrastructure-vsphere-govmomi/main/cluster-template-ipam.yaml" - sourcePath: "../../../test/e2e/data/infrastructure-vsphere-govmomi/main/cluster-template-kcp-remediation.yaml" - sourcePath: "../../../test/e2e/data/infrastructure-vsphere-govmomi/main/cluster-template-md-remediation.yaml" + - sourcePath: "../../../test/e2e/data/infrastructure-vsphere-govmomi/main/cluster-template-multi-disk.yaml" - sourcePath: "../../../test/e2e/data/infrastructure-vsphere-govmomi/main/cluster-template-node-drain.yaml" - sourcePath: "../../../test/e2e/data/infrastructure-vsphere-govmomi/main/cluster-template-ownerrefs-finalizers.yaml" - sourcePath: "../../../test/e2e/data/infrastructure-vsphere-govmomi/main/cluster-template-pci.yaml" diff --git a/test/e2e/data/infrastructure-vsphere-govmomi/main/multi-disk/data-disks-patch.yaml b/test/e2e/data/infrastructure-vsphere-govmomi/main/multi-disk/data-disks-patch.yaml new file mode 100644 index 0000000000..fcb61923e9 --- /dev/null +++ b/test/e2e/data/infrastructure-vsphere-govmomi/main/multi-disk/data-disks-patch.yaml @@ -0,0 +1,25 @@ +apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 +kind: VSphereMachineTemplate +metadata: + name: '${CLUSTER_NAME}' + namespace: '${NAMESPACE}' +spec: + template: + spec: + dataDisks: + - name: "disk_1" + sizeGiB: 10 + - name: "disk_2" + sizeGiB: 20 +--- +apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 +kind: VSphereMachineTemplate +metadata: + name: '${CLUSTER_NAME}-worker' + namespace: '${NAMESPACE}' +spec: + template: + spec: + dataDisks: + - name: "disk_1" + sizeGiB: 20 diff --git a/test/e2e/data/infrastructure-vsphere-govmomi/main/multi-disk/kustomization.yaml b/test/e2e/data/infrastructure-vsphere-govmomi/main/multi-disk/kustomization.yaml new file mode 100644 index 0000000000..e9b47748cb --- /dev/null +++ b/test/e2e/data/infrastructure-vsphere-govmomi/main/multi-disk/kustomization.yaml @@ -0,0 +1,6 @@ +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - ../base +patchesStrategicMerge: + - data-disks-patch.yaml diff --git a/test/e2e/multi_disk_test.go b/test/e2e/multi_disk_test.go new file mode 100644 index 0000000000..848e70dba1 --- /dev/null +++ b/test/e2e/multi_disk_test.go @@ -0,0 +1,100 @@ +/* +Copyright 2024 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package e2e + +import ( + "context" + "fmt" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + "github.com/vmware/govmomi/vim25/types" + "k8s.io/utils/ptr" + capi_e2e "sigs.k8s.io/cluster-api/test/e2e" + "sigs.k8s.io/cluster-api/test/framework" + . "sigs.k8s.io/cluster-api/test/framework/ginkgoextensions" +) + +type diskSpecInput struct { + InfraClients + global GlobalInput + specName string + namespace string + clusterName string +} + +var _ = Describe("Ensure govmomi mode is able to add additional disks to VMs", func() { + const specName = "multi-disk" + Setup(specName, func(testSpecificSettingsGetter func() testSettings) { + capi_e2e.QuickStartSpec(ctx, func() capi_e2e.QuickStartSpecInput { + return capi_e2e.QuickStartSpecInput{ + E2EConfig: e2eConfig, + ClusterctlConfigPath: testSpecificSettingsGetter().ClusterctlConfigPath, + BootstrapClusterProxy: bootstrapClusterProxy, + ArtifactFolder: artifactFolder, + SkipCleanup: skipCleanup, + Flavor: ptr.To(testSpecificSettingsGetter().FlavorForMode("multi-disk")), + PostNamespaceCreated: testSpecificSettingsGetter().PostNamespaceCreatedFunc, + PostMachinesProvisioned: func(_ framework.ClusterProxy, namespace, clusterName string) { + dsi := diskSpecInput{ + specName: specName, + namespace: namespace, + clusterName: clusterName, + InfraClients: InfraClients{ + Client: vsphereClient, + RestClient: restClient, + Finder: vsphereFinder, + }, + global: GlobalInput{ + BootstrapClusterProxy: bootstrapClusterProxy, + ClusterctlConfigPath: testSpecificSettingsGetter().ClusterctlConfigPath, + E2EConfig: e2eConfig, + ArtifactFolder: artifactFolder, + }, + } + verifyDisks(ctx, dsi) + }, + ControlPlaneMachineCount: ptr.To[int64](1), + WorkerMachineCount: ptr.To[int64](1), + } + }) + }) +}) + +func verifyDisks(ctx context.Context, input diskSpecInput) { + Byf("Fetching the VSphereVM objects for the cluster %s", input.clusterName) + vms := getVSphereVMsForCluster(input.clusterName, input.namespace) + + By("Verifying the disks attached to the VMs") + for _, vm := range vms.Items { + // vSphere machine object should have the data disks configured. We will add +1 to the count since the os image + // needs to be included for comparison. + Byf("VM %s Spec has %d DataDisks defined", vm.Name, len(vm.Spec.DataDisks)) + diskCount := 1 + len(vm.Spec.DataDisks) + Expect(diskCount).ToNot(Equal(1), "Total disk count should be larger than 1 for this test") + + vmObj, err := input.Finder.VirtualMachine(ctx, vm.Name) + Expect(err).NotTo(HaveOccurred()) + + devices, err := vmObj.Device(ctx) + Expect(err).NotTo(HaveOccurred()) + + // We expect control plane VMs to have 3 disks, and the compute VMs will have 2. + disks := devices.SelectByType((*types.VirtualDisk)(nil)) + Expect(disks).To(HaveLen(diskCount), fmt.Sprintf("Disk count of VM should be %d", diskCount)) + } +}