diff --git a/Makefile b/Makefile index b9cd4775d7..a4cb08fc73 100644 --- a/Makefile +++ b/Makefile @@ -374,6 +374,7 @@ generate-e2e-templates-main: $(KUSTOMIZE) ## Generate test templates for the mai cp "$(RELEASE_DIR)/main/cluster-template-ignition.yaml" "$(E2E_GOVMOMI_TEMPLATE_DIR)/main/base/cluster-template-ignition.yaml" "$(KUSTOMIZE)" --load-restrictor LoadRestrictionsNone build "$(E2E_GOVMOMI_TEMPLATE_DIR)/main/base" > "$(E2E_GOVMOMI_TEMPLATE_DIR)/main/cluster-template.yaml" "$(KUSTOMIZE)" --load-restrictor LoadRestrictionsNone build "$(E2E_GOVMOMI_TEMPLATE_DIR)/main/hw-upgrade" > "$(E2E_GOVMOMI_TEMPLATE_DIR)/main/cluster-template-hw-upgrade.yaml" + "$(KUSTOMIZE)" --load-restrictor LoadRestrictionsNone build "$(E2E_GOVMOMI_TEMPLATE_DIR)/main/multi-disk" > "$(E2E_GOVMOMI_TEMPLATE_DIR)/main/cluster-template-multi-disk.yaml" "$(KUSTOMIZE)" --load-restrictor LoadRestrictionsNone build "$(E2E_GOVMOMI_TEMPLATE_DIR)/main/storage-policy" > "$(E2E_GOVMOMI_TEMPLATE_DIR)/main/cluster-template-storage-policy.yaml" "$(KUSTOMIZE)" --load-restrictor LoadRestrictionsNone build "$(E2E_GOVMOMI_TEMPLATE_DIR)/main/conformance" > "$(E2E_GOVMOMI_TEMPLATE_DIR)/main/cluster-template-conformance.yaml" # Since CAPI uses different flavor names for KCP and MD remediation using MHC diff --git a/apis/v1alpha3/vspheremachine_conversion.go b/apis/v1alpha3/vspheremachine_conversion.go index 79a0f1d548..2544d37029 100644 --- a/apis/v1alpha3/vspheremachine_conversion.go +++ b/apis/v1alpha3/vspheremachine_conversion.go @@ -46,6 +46,7 @@ func (src *VSphereMachine) ConvertTo(dstRaw conversion.Hub) error { dst.Spec.Network.Devices[i].DHCP6Overrides = restored.Spec.Network.Devices[i].DHCP6Overrides dst.Spec.Network.Devices[i].SkipIPAllocation = restored.Spec.Network.Devices[i].SkipIPAllocation } + dst.Spec.DataDisks = restored.Spec.DataDisks return nil } diff --git a/apis/v1alpha3/vspheremachinetemplate_conversion.go b/apis/v1alpha3/vspheremachinetemplate_conversion.go index 45f090e2c1..ba13cecc12 100644 --- a/apis/v1alpha3/vspheremachinetemplate_conversion.go +++ b/apis/v1alpha3/vspheremachinetemplate_conversion.go @@ -49,6 +49,7 @@ func (src *VSphereMachineTemplate) ConvertTo(dstRaw conversion.Hub) error { dst.Spec.Template.Spec.Network.Devices[i].DHCP6Overrides = restored.Spec.Template.Spec.Network.Devices[i].DHCP6Overrides dst.Spec.Template.Spec.Network.Devices[i].SkipIPAllocation = restored.Spec.Template.Spec.Network.Devices[i].SkipIPAllocation } + dst.Spec.Template.Spec.DataDisks = restored.Spec.Template.Spec.DataDisks return nil } diff --git a/apis/v1alpha3/vspherevm_conversion.go b/apis/v1alpha3/vspherevm_conversion.go index 8bc24a37ae..95c144c9ec 100644 --- a/apis/v1alpha3/vspherevm_conversion.go +++ b/apis/v1alpha3/vspherevm_conversion.go @@ -46,6 +46,7 @@ func (src *VSphereVM) ConvertTo(dstRaw conversion.Hub) error { dst.Spec.Network.Devices[i].DHCP6Overrides = restored.Spec.Network.Devices[i].DHCP6Overrides dst.Spec.Network.Devices[i].SkipIPAllocation = restored.Spec.Network.Devices[i].SkipIPAllocation } + dst.Spec.DataDisks = restored.Spec.DataDisks return nil } diff --git a/apis/v1alpha3/zz_generated.conversion.go b/apis/v1alpha3/zz_generated.conversion.go index 966195ccbb..2d953c987c 100644 --- a/apis/v1alpha3/zz_generated.conversion.go +++ b/apis/v1alpha3/zz_generated.conversion.go @@ -1762,5 +1762,6 @@ func autoConvert_v1beta1_VirtualMachineCloneSpec_To_v1alpha3_VirtualMachineClone // WARNING: in.PciDevices requires manual conversion: does not exist in peer-type // WARNING: in.OS requires manual conversion: does not exist in peer-type // WARNING: in.HardwareVersion requires manual conversion: does not exist in peer-type + // WARNING: in.DataDisks requires manual conversion: does not exist in peer-type return nil } diff --git a/apis/v1alpha4/vspheremachine_conversion.go b/apis/v1alpha4/vspheremachine_conversion.go index 22a85aaa5a..ff5c085894 100644 --- a/apis/v1alpha4/vspheremachine_conversion.go +++ b/apis/v1alpha4/vspheremachine_conversion.go @@ -46,6 +46,7 @@ func (src *VSphereMachine) ConvertTo(dstRaw conversion.Hub) error { dst.Spec.Network.Devices[i].DHCP6Overrides = restored.Spec.Network.Devices[i].DHCP6Overrides dst.Spec.Network.Devices[i].SkipIPAllocation = restored.Spec.Network.Devices[i].SkipIPAllocation } + dst.Spec.DataDisks = restored.Spec.DataDisks return nil } diff --git a/apis/v1alpha4/vspheremachinetemplate_conversion.go b/apis/v1alpha4/vspheremachinetemplate_conversion.go index c0719758e8..bb2254e329 100644 --- a/apis/v1alpha4/vspheremachinetemplate_conversion.go +++ b/apis/v1alpha4/vspheremachinetemplate_conversion.go @@ -49,6 +49,7 @@ func (src *VSphereMachineTemplate) ConvertTo(dstRaw conversion.Hub) error { dst.Spec.Template.Spec.Network.Devices[i].DHCP6Overrides = restored.Spec.Template.Spec.Network.Devices[i].DHCP6Overrides dst.Spec.Template.Spec.Network.Devices[i].SkipIPAllocation = restored.Spec.Template.Spec.Network.Devices[i].SkipIPAllocation } + dst.Spec.Template.Spec.DataDisks = restored.Spec.Template.Spec.DataDisks return nil } diff --git a/apis/v1alpha4/vspherevm_conversion.go b/apis/v1alpha4/vspherevm_conversion.go index 1f48120cca..7fd9a4aebb 100644 --- a/apis/v1alpha4/vspherevm_conversion.go +++ b/apis/v1alpha4/vspherevm_conversion.go @@ -46,6 +46,7 @@ func (src *VSphereVM) ConvertTo(dstRaw conversion.Hub) error { dst.Spec.Network.Devices[i].DHCP6Overrides = restored.Spec.Network.Devices[i].DHCP6Overrides dst.Spec.Network.Devices[i].SkipIPAllocation = restored.Spec.Network.Devices[i].SkipIPAllocation } + dst.Spec.DataDisks = restored.Spec.DataDisks return nil } diff --git a/apis/v1alpha4/zz_generated.conversion.go b/apis/v1alpha4/zz_generated.conversion.go index 147c1a9894..197b5b4667 100644 --- a/apis/v1alpha4/zz_generated.conversion.go +++ b/apis/v1alpha4/zz_generated.conversion.go @@ -1916,5 +1916,6 @@ func autoConvert_v1beta1_VirtualMachineCloneSpec_To_v1alpha4_VirtualMachineClone // WARNING: in.PciDevices requires manual conversion: does not exist in peer-type // WARNING: in.OS requires manual conversion: does not exist in peer-type // WARNING: in.HardwareVersion requires manual conversion: does not exist in peer-type + // WARNING: in.DataDisks requires manual conversion: does not exist in peer-type return nil } diff --git a/apis/v1beta1/types.go b/apis/v1beta1/types.go index cc0f3a1f6c..ce9d909ee8 100644 --- a/apis/v1beta1/types.go +++ b/apis/v1beta1/types.go @@ -203,6 +203,23 @@ type VirtualMachineCloneSpec struct { // Check the compatibility with the ESXi version before setting the value. // +optional HardwareVersion string `json:"hardwareVersion,omitempty"` + // DataDisks are additional disks to add to the VM that are not part of the VM's OVA template. + // +optional + // +listType=map + // +listMapKey=name + // +kubebuilder:validation:MaxItems=29 + DataDisks []VSphereDisk `json:"dataDisks,omitempty"` +} + +// VSphereDisk is an additional disk to add to the VM that is not part of the VM OVA template. +type VSphereDisk struct { + // Name is used to identify the disk definition. Name is required and needs to be unique so that it can be used to + // clearly identify purpose of the disk. + // +kubebuilder:validation:Required + Name string `json:"name,omitempty"` + // SizeGiB is the size of the disk in GiB. + // +kubebuilder:validation:Required + SizeGiB int32 `json:"sizeGiB"` } // VSphereMachineTemplateResource describes the data needed to create a VSphereMachine from a template. diff --git a/apis/v1beta1/zz_generated.deepcopy.go b/apis/v1beta1/zz_generated.deepcopy.go index 44d12a65fe..584c92f93c 100644 --- a/apis/v1beta1/zz_generated.deepcopy.go +++ b/apis/v1beta1/zz_generated.deepcopy.go @@ -820,6 +820,21 @@ func (in *VSphereDeploymentZoneStatus) DeepCopy() *VSphereDeploymentZoneStatus { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VSphereDisk) DeepCopyInto(out *VSphereDisk) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VSphereDisk. +func (in *VSphereDisk) DeepCopy() *VSphereDisk { + if in == nil { + return nil + } + out := new(VSphereDisk) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *VSphereFailureDomain) DeepCopyInto(out *VSphereFailureDomain) { *out = *in @@ -1321,6 +1336,11 @@ func (in *VirtualMachineCloneSpec) DeepCopyInto(out *VirtualMachineCloneSpec) { (*in)[i].DeepCopyInto(&(*out)[i]) } } + if in.DataDisks != nil { + in, out := &in.DataDisks, &out.DataDisks + *out = make([]VSphereDisk, len(*in)) + copy(*out, *in) + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VirtualMachineCloneSpec. diff --git a/config/default/crd/bases/infrastructure.cluster.x-k8s.io_vspheremachines.yaml b/config/default/crd/bases/infrastructure.cluster.x-k8s.io_vspheremachines.yaml index 6c35e7dd3f..ae8026b9d0 100644 --- a/config/default/crd/bases/infrastructure.cluster.x-k8s.io_vspheremachines.yaml +++ b/config/default/crd/bases/infrastructure.cluster.x-k8s.io_vspheremachines.yaml @@ -974,6 +974,31 @@ spec: CustomVMXKeys is a dictionary of advanced VMX options that can be set on VM Defaults to empty map type: object + dataDisks: + description: DataDisks are additional disks to add to the VM that + are not part of the VM's OVA template. + items: + description: VSphereDisk is an additional disk to add to the VM + that is not part of the VM OVA template. + properties: + name: + description: |- + Name is used to identify the disk definition. Name is required and needs to be unique so that it can be used to + clearly identify purpose of the disk. + type: string + sizeGiB: + description: SizeGiB is the size of the disk in GiB. + format: int32 + type: integer + required: + - name + - sizeGiB + type: object + maxItems: 29 + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map datacenter: description: |- Datacenter is the name or inventory path of the datacenter in which the diff --git a/config/default/crd/bases/infrastructure.cluster.x-k8s.io_vspheremachinetemplates.yaml b/config/default/crd/bases/infrastructure.cluster.x-k8s.io_vspheremachinetemplates.yaml index 1cf5a58c33..fb870f3760 100644 --- a/config/default/crd/bases/infrastructure.cluster.x-k8s.io_vspheremachinetemplates.yaml +++ b/config/default/crd/bases/infrastructure.cluster.x-k8s.io_vspheremachinetemplates.yaml @@ -844,6 +844,31 @@ spec: CustomVMXKeys is a dictionary of advanced VMX options that can be set on VM Defaults to empty map type: object + dataDisks: + description: DataDisks are additional disks to add to the + VM that are not part of the VM's OVA template. + items: + description: VSphereDisk is an additional disk to add to + the VM that is not part of the VM OVA template. + properties: + name: + description: |- + Name is used to identify the disk definition. Name is required and needs to be unique so that it can be used to + clearly identify purpose of the disk. + type: string + sizeGiB: + description: SizeGiB is the size of the disk in GiB. + format: int32 + type: integer + required: + - name + - sizeGiB + type: object + maxItems: 29 + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map datacenter: description: |- Datacenter is the name or inventory path of the datacenter in which the diff --git a/config/default/crd/bases/infrastructure.cluster.x-k8s.io_vspherevms.yaml b/config/default/crd/bases/infrastructure.cluster.x-k8s.io_vspherevms.yaml index 079466a2f4..817c5e8d76 100644 --- a/config/default/crd/bases/infrastructure.cluster.x-k8s.io_vspherevms.yaml +++ b/config/default/crd/bases/infrastructure.cluster.x-k8s.io_vspherevms.yaml @@ -1064,6 +1064,31 @@ spec: CustomVMXKeys is a dictionary of advanced VMX options that can be set on VM Defaults to empty map type: object + dataDisks: + description: DataDisks are additional disks to add to the VM that + are not part of the VM's OVA template. + items: + description: VSphereDisk is an additional disk to add to the VM + that is not part of the VM OVA template. + properties: + name: + description: |- + Name is used to identify the disk definition. Name is required and needs to be unique so that it can be used to + clearly identify purpose of the disk. + type: string + sizeGiB: + description: SizeGiB is the size of the disk in GiB. + format: int32 + type: integer + required: + - name + - sizeGiB + type: object + maxItems: 29 + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map datacenter: description: |- Datacenter is the name or inventory path of the datacenter in which the diff --git a/docs/proposal/20241003-data-disk-support.md b/docs/proposal/20241003-data-disk-support.md new file mode 100644 index 0000000000..628088fb22 --- /dev/null +++ b/docs/proposal/20241003-data-disk-support.md @@ -0,0 +1,219 @@ +# CAPV Additional Data Disks When Creating New Machines + +```text +--- +title: CAPV Additional Data Disks When Creating New Machines +authors: + - "@vr4manta" +reviewers: + - TBD +creation-date: 2024-10-03 +last-updated: 2024-10-03 +status: implementable +--- +``` + +## Table of Contents + +* [CAPV ControlPlane Failure Domain](#capv-controlplane-failure-domain) + * [Table of Contents](#table-of-contents) + * [Glossary](#glossary) + * [Summary](#summary) + * [Motivation](#motivation) + * [Goals](#goals) + * [Non-Goals/Future Work](#non-goalsfuture-work) + * [Proposal](#proposal) + * [User Stories](#user-stories) + * [Story 1](#story-1) + * [Story 2](#story-2) + * [Requirements](#requirements) + * [Functional Requirements](#functional-requirements) + * [Overall Design](#overall-design) + * [For User Story 1](#for-user-story-1) + * [For User Story 2](#for-user-story-2) + * [API Design](#api-design) + * [Implementation Details](#implementation-details) + * [Notes/Constraints](#notesconstraints) + * [Upgrade Strategy](#upgrade-strategy) + +## Glossary + + +## Summary + +As the use of Kubernetes clusters grows, admins are needing more and more improvements to the VMs themselves to make sure they run as smoothly as possible. The number of cores and memory continue to increase for each machine and this is causing the amount of workloads to increase on each virtual machine. This growth is now causing the base VM image to not provide enough storage for OS needs. In some cases, users just increase the size of the primary disk using the existing configuration options for machines; however, this does not allows for all desired configuration choices. Admins are now wanting the ability to add additional disks to these VMs for things such as etcd storage, image storage, container runtime and even swap. + +Before this feature, CAPV only allows for the configuration of additional disks that are found within the vSphere VM Template that is used for cloning. As clusters stretch failure domains and as clusters contain multiple different machine sets, attempting to just create custom vSphere VM templates will cause the admin to have to manage a large number of vSphere OVA templates. Instead, it would be ideal if admins can just configure a machine or machineset to add additional disks to a VM during the cloning process that are not part of the template. + +This feature enhancement aims to allow admins the ability to configure additional disks that are not present in the VM templates by enhancing the vsphere machine API and adding the capability to the cloning process. + +## Motivation + +Cluster administrators are asking for the ability to add additional data disks to be used by the OS without having to create custom OVA images to be used by the VM cloning process. + +### Goals + +* Add new configuration for machines that are used to define new data disks to add to a VM. +* Align new property naming to be similar or even match other providers if possible. +* Do not boil the ocean with the initial implementation of this feature. + +### Non-Goals/Future Work + +* Add ability to advance configure additions disks, such as, define controller type (IDE, scsi, etc) or unit number in the controller. +* Any disk management features such as encryption. + +## Proposal + +### User Stories + +#### Story 1 + +As an admin, I’d like my control plane machines to have an extra disk added so I can dedicate that disk for etcd database through my own means of mount management. + +#### Story 2 + +As an admin, I’d like my compute machine set to have an extra disk added to each VM so that I can use it as a dedicated disk for container image storage. + +### Requirements + +#### Functional Requirements + +### Overall Design + +The CAPV vSphere machine clone spec (VirtualMachineCloneSpec) contains a new array field to specify all additional data disks (dataDisks). These disks will be created and added to the VM during the clone process. + +Once the machine has been created, any change to the DataDisks field will follow existing change behaviors for machine sets and single machine definitions. + +CAPV will not be responsible for any custom mounting of the data disks. CAPV will create the disk and attach to the VM. Each OS may assign them differently so admin will need to understand the OS image they are using for their template and then configure each node. + +#### For User Story 1 + +CAPV will create the new disks for control plane nodes during cluster creation. + +The new disks will be placed in the same location as the primary disk (datastore) and will use the controller as the primary disk. + +Creating new controllers is out of scope for this enhancement. The new disk will use the same controller as the primary disks. The unit number for the disks on that controller will be in the order in which the disks are defined in the machine spec configuration. + +#### For User Story 2 + +CAPV will treat all compute machine creation the same as the control plane machines. + +### API Design + +The following new struct has been created to represent all configuration for a vSphere disk + +```go +// VSphereDisk describes additional disks for vSphere to be added to VM that are not part of the VM OVA template. +type VSphereDisk struct { + // Name is used to identify the disk definition. If Name is not specified, the disk will still be created. + // The Name should be unique so that it can be used to clearly identify purpose of the disk, but is not + // required to be unique. + // +optional + Name string `json:"name,omitempty"` + // SizeGiB is the size of the disk (in GiB). + // +kubebuilder:validation:Required + SizeGiB int32 `json:"sizeGiB"` +} +``` + +The above type has been added to the machine spec section + +```go +// VirtualMachineCloneSpec is information used to clone a virtual machine. +type VirtualMachineCloneSpec struct { + ... + // DataDisks holds information for additional disks to add to the VM that are not part of the VM's OVA template. + // +optional + DataDisks []VSphereDisk `json:"dataDisks,omitempty"` +} +``` + +### Implementation Details + +The cloning process will now be able to add data disks to a VM. Using the config provided in the VSphereCloneTemplate, the defined disks will be added to the virtual machine. Each disk is added to the controller used by the primary disk. Currently, there are no plans to allow the user to define a new controller (SCSI/IDE/etc) for these disks. + +An example of what the VSphereMachineTemplate looks like when data disks are desired: +```yaml +apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 +kind: VSphereMachineTemplate +metadata: + name: test-machine-template + namespace: openshift-cluster-api +spec: + template: + spec: + template: CAPV_2_Disk + server: vcenter.example.com + diskGiB: 128 + dataDisks: + - name: images + sizeGiB: 50 + - name: swap + sizeGiB: 90 + cloneMode: linkedClone + datacenter: cidatacenter + datastore: /cidatacenter/datastore/vsanDatastore + folder: /cidatacenter/vm/multi-disk-k96l6 + resourcePool: /cidatacenter/host/cicluster + numCPUs: 4 + memoryMiB: 16384 + network: + devices: + - dhcp4: true + networkName: ci-vlan-1240 +``` + +An example of what the VSphereMachine looks like when data disks are desired. +```yaml +apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 +kind: VSphereMachine +metadata: + name: ngirard-multi-qxkhh-single-def + namespace: openshift-cluster-api +spec: + cloneMode: linkedClone + dataDisks: + - name: images + sizeGiB: 50 + - name: swap + sizeGiB: 90 + datacenter: cidatacenter + datastore: /cidatacenter/datastore/vsanDatastore + diskGiB: 128 + folder: /cidatacenter/vm/ngirard-multi-qxkhh + memoryMiB: 16384 + network: + devices: + - dhcp4: true + networkName: ci-vlan-1240 + numCPUs: 4 + resourcePool: /cidatacenter/host/cicluster + server: vcenter.ci.ibmc.devcluster.openshift.com + template: CAPV_2_Disk + +``` + +In the above examples, two data disks will be created during the clone process and placed after the OS disk. The example shows a 50 GiB with name `images` and a 90 GiB disk with name `swap` being configured and added. + +For each `dataDisks` definition, the clone procedure will attempt to generate a device VirtualDeviceConfigSpec that will be used to create the new disk device. Each disk will be attached in the order in which they are defined in the template. All disks defined in the vSphere OVA template will come first with the new disks being attached after. + +The clone procedure will assign each new disk to the same controller being used by the primary (OS) disk of the OVA template. The current behavior of the clone procedure will not be able to create any new controllers; however, in the future, the template may be enhanced to define new controllers and then assign them during the disk creation portion of the clone procedure. + +### Notes/Constraints + +## Upgrade Strategy + +Data disks are optional. Existing clusters will continue to work without changes. Once existing clusters are upgraded, the new feature will be available for existing machines to be reconfigured / recreated. + +### MachineSets + +Existing machineset, that wish to take advantage of the new data disk feature, will need to be reconfigured with data disks and will need to rotate in the new machines. The modification behavior of all existing custom resources will remain the same. If an administrator wishes to add data disks to a MachineSet, they need to recreate the existing referenced VSphereMachineTemplate or create a new VSphereMachineTemplate and have the MachineSet reference that new one. + +### VSphereMachineTemplates + +If an existing template is modified, you will be greeted by an error message similar to the following: + +>vspheremachinetemplates.infrastructure.cluster.x-k8s.io "test-machine-template" was not valid: +spec.template.spec: Invalid value: v1beta1.VSphereMachineTemplate{TypeMeta:v1.TypeMeta{Kind:"VSphereMachineTemplate", APIVersion:"infrastructure.cluster.x-k8s.io/v1beta1"}, ObjectMeta:v1.ObjectMeta{Name:"test-machine-template", GenerateName:"", Namespace:"openshift-cluster-api", SelfLink:"", UID:"7467ba3f-758e-4873-afc3-4b8ece9da737", ResourceVersion:"92227", Generation:2, CreationTimestamp:time.Date(2024, time.October, 31, 14, 35, 9, 0, time.Local), DeletionTimestamp:, DeletionGracePeriodSeconds:(*int64)(nil), Labels:map[string]string(nil), Annotations:map[string]string(nil), OwnerReferences:[]v1.OwnerReference{v1.OwnerReference{APIVersion:"cluster.x-k8s.io/v1beta1", Kind:"Cluster", Name:"ngirard-multi-qxkhh", UID:"7c3030d9-8caa-40e2-a62c-db16d0749fb5", Controller:(*bool)(nil), BlockOwnerDeletion:(*bool)(nil)}}, Finalizers:[]string(nil), ManagedFields:[]v1.ManagedFieldsEntry{v1.ManagedFieldsEntry{Manager:"kubectl-create", Operation:"Update", APIVersion:"infrastructure.cluster.x-k8s.io/v1beta1", Time:time.Date(2024, time.October, 31, 14, 35, 9, 0, time.Local), FieldsType:"FieldsV1", FieldsV1:(*v1.FieldsV1)(0xc000ace450), Subresource:""}, v1.ManagedFieldsEntry{Manager:"cluster-api-controller-manager", Operation:"Update", APIVersion:"infrastructure.cluster.x-k8s.io/v1beta1", Time:time.Date(2024, time.October, 31, 14, 35, 58, 0, time.Local), FieldsType:"FieldsV1", FieldsV1:(*v1.FieldsV1)(0xc000ace4e0), Subresource:""}, v1.ManagedFieldsEntry{Manager:"kubectl-edit", Operation:"Update", APIVersion:"infrastructure.cluster.x-k8s.io/v1beta1", Time:time.Date(2024, time.November, 4, 13, 31, 33, 0, time.Local), FieldsType:"FieldsV1", FieldsV1:(*v1.FieldsV1)(0xc000ace510), Subresource:""}}}, Spec:v1beta1.VSphereMachineTemplateSpec{Template:v1beta1.VSphereMachineTemplateResource{ObjectMeta:v1beta1.ObjectMeta{Labels:map[string]string(nil), Annotations:map[string]string(nil)}, Spec:v1beta1.VSphereMachineSpec{VirtualMachineCloneSpec:v1beta1.VirtualMachineCloneSpec{Template:"CAPV_2_Disk", CloneMode:"linkedClone", Snapshot:"", Server:"vcenter.ci.ibmc.devcluster.openshift.com", Thumbprint:"", Datacenter:"cidatacenter", Folder:"/cidatacenter/vm/ngirard-multi-qxkhh", Datastore:"/cidatacenter/datastore/vsanDatastore", StoragePolicyName:"", ResourcePool:"/cidatacenter/host/cicluster", Network:v1beta1.NetworkSpec{Devices:[]v1beta1.NetworkDeviceSpec{v1beta1.NetworkDeviceSpec{NetworkName:"ci-vlan-1240", DeviceName:"", DHCP4:true, DHCP6:false, Gateway4:"", Gateway6:"", IPAddrs:[]string(nil), MTU:(*int64)(nil), MACAddr:"", Nameservers:[]string(nil), Routes:[]v1beta1.NetworkRouteSpec(nil), SearchDomains:[]string(nil), AddressesFromPools:[]v1.TypedLocalObjectReference(nil), DHCP4Overrides:(*v1beta1.DHCPOverrides)(nil), DHCP6Overrides:(*v1beta1.DHCPOverrides)(nil), SkipIPAllocation:false}}, Routes:[]v1beta1.NetworkRouteSpec(nil), PreferredAPIServerCIDR:""}, NumCPUs:4, NumCoresPerSocket:0, MemoryMiB:16384, DiskGiB:128, AdditionalDisksGiB:[]int32(nil), CustomVMXKeys:map[string]string(nil), TagIDs:[]string(nil), PciDevices:[]v1beta1.PCIDeviceSpec(nil), OS:"", HardwareVersion:"", DataDisks:[]v1beta1.VSphereDisk{v1beta1.VSphereDisk{Name:"", SizeGiB:69}, v1beta1.VSphereDisk{Name:"", SizeGiB:27}}}, ProviderID:(*string)(nil), FailureDomain:(*string)(nil), PowerOffMode:"hard", GuestSoftPowerOffTimeout:(*v1.Duration)(nil)}}}}: VSphereMachineTemplate spec.template.spec field is immutable. Please create a new resource instead. + +This is why existing templates will need to be recreated if attempting to add data disks to the definition. \ No newline at end of file diff --git a/pkg/services/govmomi/vcenter/clone.go b/pkg/services/govmomi/vcenter/clone.go index 91f64eb28d..7012cdc573 100644 --- a/pkg/services/govmomi/vcenter/clone.go +++ b/pkg/services/govmomi/vcenter/clone.go @@ -42,6 +42,11 @@ import ( const ( fullCloneDiskMoveType = types.VirtualMachineRelocateDiskMoveOptionsMoveAllDiskBackingsAndConsolidate linkCloneDiskMoveType = types.VirtualMachineRelocateDiskMoveOptionsCreateNewChildDiskBacking + + // maxUnitNumber constant is used to define the maximum number of devices that can be assigned to a virtual machine's controller. + // Not all controllers support up to 30, but the maximum is 30. + // xref: https://docs.vmware.com/en/VMware-vSphere/8.0/vsphere-vm-administration/GUID-5872D173-A076-42FE-8D0B-9DB0EB0E7362.html#:~:text=If%20you%20add%20a%20hard,values%20from%200%20to%2014. + maxUnitNumber = 30 ) // Clone kicks off a clone operation on vCenter to create a new virtual machine. This function does not wait for @@ -145,6 +150,16 @@ func Clone(ctx context.Context, vmCtx *capvcontext.VMContext, bootstrapData []by deviceSpecs = append(deviceSpecs, diskSpecs...) } + // Process all DataDisks definitions to dynamically create and add disks to the VM + if len(vmCtx.VSphereVM.Spec.DataDisks) > 0 { + dataDisks, err := createDataDisks(ctx, vmCtx.VSphereVM.Spec.DataDisks, devices) + if err != nil { + return errors.Wrapf(err, "error getting data disks") + } + log.V(4).Info("Adding the following data disks", "disks", dataDisks) + deviceSpecs = append(deviceSpecs, dataDisks...) + } + networkSpecs, err := getNetworkSpecs(ctx, vmCtx, devices) if err != nil { return errors.Wrapf(err, "error getting network specs for %q", ctx) @@ -390,6 +405,117 @@ func getDiskConfigSpec(disk *types.VirtualDisk, diskCloneCapacityKB int64) (type }, nil } +// createDataDisks parses through the list of VSphereDisk objects and generates the VirtualDeviceConfigSpec for each one. +func createDataDisks(ctx context.Context, dataDiskDefs []infrav1.VSphereDisk, devices object.VirtualDeviceList) ([]types.BaseVirtualDeviceConfigSpec, error) { + log := ctrl.LoggerFrom(ctx) + additionalDisks := []types.BaseVirtualDeviceConfigSpec{} + + disks := devices.SelectByType((*types.VirtualDisk)(nil)) + if len(disks) == 0 { + return nil, errors.Errorf("Invalid disk count: %d", len(disks)) + } + + // There is at least one disk + primaryDisk := disks[0].(*types.VirtualDisk) + + // Get the controller of the primary disk. + controller, ok := devices.FindByKey(primaryDisk.ControllerKey).(types.BaseVirtualController) + if !ok { + return nil, errors.Errorf("unable to find controller with key=%v", primaryDisk.ControllerKey) + } + + controllerKey := controller.GetVirtualController().Key + unitNumberAssigner, err := newUnitNumberAssigner(controller, devices) + if err != nil { + return nil, err + } + + for i, dataDisk := range dataDiskDefs { + log.V(2).Info("Adding disk", "name", dataDisk.Name, "spec", dataDisk) + + dev := &types.VirtualDisk{ + VirtualDevice: types.VirtualDevice{ + // Key needs to be unique and cannot match another new disk being added. So we'll use the index as an + // input to NewKey. NewKey() will always return same value since our new devices are not part of devices yet. + Key: devices.NewKey() - int32(i), + Backing: &types.VirtualDiskFlatVer2BackingInfo{ + DiskMode: string(types.VirtualDiskModePersistent), + ThinProvisioned: types.NewBool(true), + VirtualDeviceFileBackingInfo: types.VirtualDeviceFileBackingInfo{ + FileName: "", + }, + }, + ControllerKey: controller.GetVirtualController().Key, + }, + CapacityInKB: int64(dataDisk.SizeGiB) * 1024 * 1024, + } + + vd := dev.GetVirtualDevice() + vd.ControllerKey = controllerKey + + // Assign unit number to the new disk. Should be next available slot on the controller. + unitNumber, err := unitNumberAssigner.assign() + if err != nil { + return nil, err + } + vd.UnitNumber = &unitNumber + + log.V(4).Info("Created device for data disk device", "name", dataDisk.Name, "spec", dataDisk, "device", dev) + + additionalDisks = append(additionalDisks, &types.VirtualDeviceConfigSpec{ + Device: dev, + Operation: types.VirtualDeviceConfigSpecOperationAdd, + FileOperation: types.VirtualDeviceConfigSpecFileOperationCreate, + }) + } + + return additionalDisks, nil +} + +type unitNumberAssigner struct { + used []bool + offset int32 +} + +func newUnitNumberAssigner(controller types.BaseVirtualController, existingDevices object.VirtualDeviceList) (*unitNumberAssigner, error) { + if controller == nil { + return nil, errors.New("controller parameter cannot be nil") + } + used := make([]bool, maxUnitNumber) + + // SCSIControllers also use a unit. + if scsiController, ok := controller.(types.BaseVirtualSCSIController); ok { + used[scsiController.GetVirtualSCSIController().ScsiCtlrUnitNumber] = true + } + controllerKey := controller.GetVirtualController().Key + + // Mark all unit numbers of existing devices as used + for _, device := range existingDevices { + d := device.GetVirtualDevice() + if d.ControllerKey == controllerKey && d.UnitNumber != nil { + used[*d.UnitNumber] = true + } + } + + // Set offset to 0, it will auto-increment on the first assignment. + return &unitNumberAssigner{used: used, offset: 0}, nil +} + +func (a *unitNumberAssigner) assign() (int32, error) { + if int(a.offset) > len(a.used) { + return -1, fmt.Errorf("all unit numbers are already in-use") + } + for i, isInUse := range a.used[a.offset:] { + unit := int32(i) + a.offset + if !isInUse { + a.used[unit] = true + a.offset++ + return unit, nil + } + } + return -1, fmt.Errorf("all unit numbers are already in-use") +} + const ethCardType = "vmxnet3" func getNetworkSpecs(ctx context.Context, vmCtx *capvcontext.VMContext, devices object.VirtualDeviceList) ([]types.BaseVirtualDeviceConfigSpec, error) { diff --git a/pkg/services/govmomi/vcenter/clone_test.go b/pkg/services/govmomi/vcenter/clone_test.go index 292dc00b6a..f8182d7564 100644 --- a/pkg/services/govmomi/vcenter/clone_test.go +++ b/pkg/services/govmomi/vcenter/clone_test.go @@ -19,6 +19,7 @@ package vcenter import ( ctx "context" "crypto/tls" + "fmt" "testing" "github.com/vmware/govmomi/object" @@ -60,25 +61,30 @@ func TestGetDiskSpec(t *testing.T) { additionalCloneDiskSizes []int32 name string disks object.VirtualDeviceList + dataDisks []infrav1.VSphereDisk + expectedDiskCount int err string }{ { - name: "Successfully clone template with correct disk requirements", - disks: defaultDisks, - cloneDiskSize: defaultSizeGiB, - expectDevice: true, + name: "Successfully clone template with correct disk requirements", + disks: defaultDisks, + cloneDiskSize: defaultSizeGiB, + expectDevice: true, + expectedDiskCount: 1, }, { - name: "Successfully clone template and increase disk requirements", - disks: defaultDisks, - cloneDiskSize: defaultSizeGiB + 1, - expectDevice: true, + name: "Successfully clone template and increase disk requirements", + disks: defaultDisks, + cloneDiskSize: defaultSizeGiB + 1, + expectDevice: true, + expectedDiskCount: 1, }, { - name: "Successfully clone template with no explicit disk requirements", - disks: defaultDisks, - cloneDiskSize: 0, - expectDevice: true, + name: "Successfully clone template with no explicit disk requirements", + disks: defaultDisks, + cloneDiskSize: 0, + expectDevice: true, + expectedDiskCount: 1, }, { name: "Fail to clone template with lower disk requirements then on template", @@ -98,6 +104,7 @@ func TestGetDiskSpec(t *testing.T) { cloneDiskSize: defaultSizeGiB + 1, additionalCloneDiskSizes: []int32{defaultSizeGiB + 1}, expectDevice: true, + expectedDiskCount: 2, }, { name: "Fails to clone template and decrease second disk size", @@ -121,25 +128,187 @@ func TestGetDiskSpec(t *testing.T) { }, } vmContext := &capvcontext.VMContext{VSphereVM: vsphereVM} - devices, err := getDiskSpec(vmContext, tc.disks) + deviceResults, err := getDiskSpec(vmContext, tc.disks) if (tc.err != "" && err == nil) || (tc.err == "" && err != nil) || (err != nil && tc.err != err.Error()) { t.Fatalf("Expected to get '%v' error from getDiskSpec, got: '%v'", tc.err, err) } - if deviceFound := len(devices) != 0; tc.expectDevice != deviceFound { - t.Fatalf("Expected to get a device: %v, but got: '%#v'", tc.expectDevice, devices) + if deviceFound := len(deviceResults) != 0; tc.expectDevice != deviceFound { + t.Fatalf("Expected to get a device: %v, but got: '%#v'", tc.expectDevice, deviceResults) } if tc.expectDevice { - primaryDevice := devices[0] + primaryDevice := deviceResults[0] validateDiskSpec(t, primaryDevice, tc.cloneDiskSize) if len(tc.additionalCloneDiskSizes) != 0 { - secondaryDevice := devices[1] + secondaryDevice := deviceResults[1] validateDiskSpec(t, secondaryDevice, tc.additionalCloneDiskSizes[0]) } + + // Check number of disks present + if len(deviceResults) != tc.expectedDiskCount { + t.Fatalf("Expected device count to be %v, but found %v", tc.expectedDiskCount, len(deviceResults)) + } } }) } } +func TestCreateDataDisks(t *testing.T) { + model, session, server := initSimulator(t) + t.Cleanup(model.Remove) + t.Cleanup(server.Close) + vm := simulator.Map.Any("VirtualMachine").(*simulator.VirtualMachine) + machine := object.NewVirtualMachine(session.Client.Client, vm.Reference()) + + deviceList, err := machine.Device(ctx.TODO()) + if err != nil { + t.Fatalf("Failed to obtain vm devices: %v", err) + } + + // Find primary disk and get controller + disks := deviceList.SelectByType((*types.VirtualDisk)(nil)) + primaryDisk := disks[0].(*types.VirtualDisk) + controller, ok := deviceList.FindByKey(primaryDisk.ControllerKey).(types.BaseVirtualController) + if !ok { + t.Fatalf("unable to get controller for test") + } + + testCases := []struct { + name string + devices object.VirtualDeviceList + controller types.BaseVirtualController + dataDisks []infrav1.VSphereDisk + expectedUnitNumber []int + err string + }{ + { + name: "Add data disk with 1 ova disk", + devices: deviceList, + controller: controller, + dataDisks: createDataDiskDefinitions(1), + expectedUnitNumber: []int{1}, + }, + { + name: "Add data disk with 2 ova disk", + devices: createAdditionalDisks(deviceList, controller, 1), + controller: controller, + dataDisks: createDataDiskDefinitions(1), + expectedUnitNumber: []int{2}, + }, + { + name: "Add multiple data disk with 1 ova disk", + devices: deviceList, + controller: controller, + dataDisks: createDataDiskDefinitions(2), + expectedUnitNumber: []int{1, 2}, + }, + { + name: "Add too many data disks with 1 ova disk", + devices: deviceList, + controller: controller, + dataDisks: createDataDiskDefinitions(30), + err: "all unit numbers are already in-use", + }, + { + name: "Add data disk with no ova disk", + devices: nil, + controller: nil, + dataDisks: createDataDiskDefinitions(1), + err: "Invalid disk count: 0", + }, + { + name: "Add too many data disks with 1 ova disk", + devices: deviceList, + controller: controller, + dataDisks: createDataDiskDefinitions(40), + err: "all unit numbers are already in-use", + }, + } + + for _, test := range testCases { + tc := test + t.Run(tc.name, func(t *testing.T) { + var funcError error + + // Create the data disks + newDisks, funcError := createDataDisks(ctx.TODO(), tc.dataDisks, tc.devices) + if (tc.err != "" && funcError == nil) || (tc.err == "" && funcError != nil) || (funcError != nil && tc.err != funcError.Error()) { + t.Fatalf("Expected to get '%v' error from assignUnitNumber, got: '%v'", tc.err, funcError) + } + + if tc.err == "" && funcError == nil { + // Check number of disks present + if len(newDisks) != len(tc.dataDisks) { + t.Fatalf("Expected device count to be %v, but found %v", len(tc.dataDisks), len(newDisks)) + } + + // Validate the configs of new data disks + for index, disk := range newDisks { + // Check disk size matches original request + vd := disk.GetVirtualDeviceConfigSpec().Device.(*types.VirtualDisk) + expectedSize := int64(tc.dataDisks[index].SizeGiB * 1024 * 1024) + if vd.CapacityInKB != expectedSize { + t.Fatalf("Expected disk size (KB) %d to match %d", vd.CapacityInKB, expectedSize) + } + + // Check unit number + unitNumber := *disk.GetVirtualDeviceConfigSpec().Device.GetVirtualDevice().UnitNumber + if tc.err == "" && unitNumber != int32(tc.expectedUnitNumber[index]) { + t.Fatalf("Expected to get unitNumber '%d' error from assignUnitNumber, got: '%d'", tc.expectedUnitNumber[index], unitNumber) + } + } + } + }) + } +} + +func createAdditionalDisks(devices object.VirtualDeviceList, controller types.BaseVirtualController, numOfDisks int) object.VirtualDeviceList { + deviceList := devices + disks := devices.SelectByType((*types.VirtualDisk)(nil)) + primaryDisk := disks[0].(*types.VirtualDisk) + + for i := 0; i < numOfDisks; i++ { + newDevice := createVirtualDisk(primaryDisk.ControllerKey+1, controller, 10) + newUnitNumber := *primaryDisk.UnitNumber + int32(i+1) + newDevice.UnitNumber = &newUnitNumber + deviceList = append(deviceList, newDevice) + } + return deviceList +} + +func createVirtualDisk(key int32, controller types.BaseVirtualController, diskSize int32) *types.VirtualDisk { + dev := &types.VirtualDisk{ + VirtualDevice: types.VirtualDevice{ + Key: key, + Backing: &types.VirtualDiskFlatVer2BackingInfo{ + DiskMode: string(types.VirtualDiskModePersistent), + ThinProvisioned: types.NewBool(true), + VirtualDeviceFileBackingInfo: types.VirtualDeviceFileBackingInfo{ + FileName: "", + }, + }, + }, + CapacityInKB: int64(diskSize) * 1024 * 1024, + } + + if controller != nil { + dev.VirtualDevice.ControllerKey = controller.GetVirtualController().Key + } + return dev +} + +func createDataDiskDefinitions(numOfDataDisks int) []infrav1.VSphereDisk { + disks := []infrav1.VSphereDisk{} + + for i := 0; i < numOfDataDisks; i++ { + disk := infrav1.VSphereDisk{ + Name: fmt.Sprintf("disk_%d", i), + SizeGiB: 10 * int32(i), + } + disks = append(disks, disk) + } + return disks +} + func validateDiskSpec(t *testing.T, device types.BaseVirtualDeviceConfigSpec, cloneDiskSize int32) { t.Helper() disk := device.GetVirtualDeviceConfigSpec().Device.(*types.VirtualDisk) diff --git a/test/e2e/config/vsphere.yaml b/test/e2e/config/vsphere.yaml index 8e91e48e3b..3ccda45e9f 100644 --- a/test/e2e/config/vsphere.yaml +++ b/test/e2e/config/vsphere.yaml @@ -173,6 +173,7 @@ providers: - sourcePath: "../../../test/e2e/data/infrastructure-vsphere-govmomi/main/cluster-template-ipam.yaml" - sourcePath: "../../../test/e2e/data/infrastructure-vsphere-govmomi/main/cluster-template-kcp-remediation.yaml" - sourcePath: "../../../test/e2e/data/infrastructure-vsphere-govmomi/main/cluster-template-md-remediation.yaml" + - sourcePath: "../../../test/e2e/data/infrastructure-vsphere-govmomi/main/cluster-template-multi-disk.yaml" - sourcePath: "../../../test/e2e/data/infrastructure-vsphere-govmomi/main/cluster-template-node-drain.yaml" - sourcePath: "../../../test/e2e/data/infrastructure-vsphere-govmomi/main/cluster-template-ownerrefs-finalizers.yaml" - sourcePath: "../../../test/e2e/data/infrastructure-vsphere-govmomi/main/cluster-template-pci.yaml" diff --git a/test/e2e/data/infrastructure-vsphere-govmomi/main/multi-disk/data-disks-patch.yaml b/test/e2e/data/infrastructure-vsphere-govmomi/main/multi-disk/data-disks-patch.yaml new file mode 100644 index 0000000000..fcb61923e9 --- /dev/null +++ b/test/e2e/data/infrastructure-vsphere-govmomi/main/multi-disk/data-disks-patch.yaml @@ -0,0 +1,25 @@ +apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 +kind: VSphereMachineTemplate +metadata: + name: '${CLUSTER_NAME}' + namespace: '${NAMESPACE}' +spec: + template: + spec: + dataDisks: + - name: "disk_1" + sizeGiB: 10 + - name: "disk_2" + sizeGiB: 20 +--- +apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 +kind: VSphereMachineTemplate +metadata: + name: '${CLUSTER_NAME}-worker' + namespace: '${NAMESPACE}' +spec: + template: + spec: + dataDisks: + - name: "disk_1" + sizeGiB: 20 diff --git a/test/e2e/data/infrastructure-vsphere-govmomi/main/multi-disk/kustomization.yaml b/test/e2e/data/infrastructure-vsphere-govmomi/main/multi-disk/kustomization.yaml new file mode 100644 index 0000000000..e9b47748cb --- /dev/null +++ b/test/e2e/data/infrastructure-vsphere-govmomi/main/multi-disk/kustomization.yaml @@ -0,0 +1,6 @@ +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - ../base +patchesStrategicMerge: + - data-disks-patch.yaml diff --git a/test/e2e/multi_disk_test.go b/test/e2e/multi_disk_test.go new file mode 100644 index 0000000000..848e70dba1 --- /dev/null +++ b/test/e2e/multi_disk_test.go @@ -0,0 +1,100 @@ +/* +Copyright 2024 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package e2e + +import ( + "context" + "fmt" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + "github.com/vmware/govmomi/vim25/types" + "k8s.io/utils/ptr" + capi_e2e "sigs.k8s.io/cluster-api/test/e2e" + "sigs.k8s.io/cluster-api/test/framework" + . "sigs.k8s.io/cluster-api/test/framework/ginkgoextensions" +) + +type diskSpecInput struct { + InfraClients + global GlobalInput + specName string + namespace string + clusterName string +} + +var _ = Describe("Ensure govmomi mode is able to add additional disks to VMs", func() { + const specName = "multi-disk" + Setup(specName, func(testSpecificSettingsGetter func() testSettings) { + capi_e2e.QuickStartSpec(ctx, func() capi_e2e.QuickStartSpecInput { + return capi_e2e.QuickStartSpecInput{ + E2EConfig: e2eConfig, + ClusterctlConfigPath: testSpecificSettingsGetter().ClusterctlConfigPath, + BootstrapClusterProxy: bootstrapClusterProxy, + ArtifactFolder: artifactFolder, + SkipCleanup: skipCleanup, + Flavor: ptr.To(testSpecificSettingsGetter().FlavorForMode("multi-disk")), + PostNamespaceCreated: testSpecificSettingsGetter().PostNamespaceCreatedFunc, + PostMachinesProvisioned: func(_ framework.ClusterProxy, namespace, clusterName string) { + dsi := diskSpecInput{ + specName: specName, + namespace: namespace, + clusterName: clusterName, + InfraClients: InfraClients{ + Client: vsphereClient, + RestClient: restClient, + Finder: vsphereFinder, + }, + global: GlobalInput{ + BootstrapClusterProxy: bootstrapClusterProxy, + ClusterctlConfigPath: testSpecificSettingsGetter().ClusterctlConfigPath, + E2EConfig: e2eConfig, + ArtifactFolder: artifactFolder, + }, + } + verifyDisks(ctx, dsi) + }, + ControlPlaneMachineCount: ptr.To[int64](1), + WorkerMachineCount: ptr.To[int64](1), + } + }) + }) +}) + +func verifyDisks(ctx context.Context, input diskSpecInput) { + Byf("Fetching the VSphereVM objects for the cluster %s", input.clusterName) + vms := getVSphereVMsForCluster(input.clusterName, input.namespace) + + By("Verifying the disks attached to the VMs") + for _, vm := range vms.Items { + // vSphere machine object should have the data disks configured. We will add +1 to the count since the os image + // needs to be included for comparison. + Byf("VM %s Spec has %d DataDisks defined", vm.Name, len(vm.Spec.DataDisks)) + diskCount := 1 + len(vm.Spec.DataDisks) + Expect(diskCount).ToNot(Equal(1), "Total disk count should be larger than 1 for this test") + + vmObj, err := input.Finder.VirtualMachine(ctx, vm.Name) + Expect(err).NotTo(HaveOccurred()) + + devices, err := vmObj.Device(ctx) + Expect(err).NotTo(HaveOccurred()) + + // We expect control plane VMs to have 3 disks, and the compute VMs will have 2. + disks := devices.SelectByType((*types.VirtualDisk)(nil)) + Expect(disks).To(HaveLen(diskCount), fmt.Sprintf("Disk count of VM should be %d", diskCount)) + } +}