Skip to content

Commit

Permalink
added more tests for scale down
Browse files Browse the repository at this point in the history
  • Loading branch information
deepakm-ntnx committed Feb 5, 2024
1 parent 6f63fc7 commit de0fc32
Show file tree
Hide file tree
Showing 3 changed files with 96 additions and 18 deletions.
90 changes: 80 additions & 10 deletions test/e2e/cluster_topology_scale_up_down_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -62,7 +62,17 @@ var _ = Describe("When scaling down cluster with topology ", Label("clusterclass
dumpSpecResourcesAndCleanup(ctx, specName, bootstrapClusterProxy, artifactFolder, namespace, cancelWatches, cluster, e2eConfig.GetIntervals, skipCleanup)
})

var scaleDownMemorySizeWorkflow = func(targetKubeVer, targetImageName, fromMachineMemorySizeGibStr, toMachineMemorySizeGibStr string) {
var scaleDownWorkflow = func(
targetKubeVer,
targetImageName,
fromMachineMemorySizeGibStr,
toMachineMemorySizeGibStr,
fromMachineSystemDiskSizeGibStr,
toMachineSystemDiskSizeGibStr string,
fromMachineVCPUSockets,
toMachineVCPUSockets,
fromMachineVCPUsPerSocket,
toMachineVCPUsPerSocket int64) {
By("Creating a workload cluster with topology")
clusterTopologyConfig := NewClusterTopologyConfig(
WithName(clusterName),
Expand Down Expand Up @@ -107,12 +117,17 @@ var _ = Describe("When scaling down cluster with topology ", Label("clusterclass

var toMachineMemorySizeGib int64
fmt.Sscan(toMachineMemorySizeGibStr, &toMachineMemorySizeGib)
By("Check if all the machines have scaled down memory size")
testHelper.verifyMemorySizeOnNutanixMachines(ctx, verifyMemorySizeOnNutanixMachinesParams{
clusterName: clusterName,
namespace: namespace,
toMachineMemorySizeGib: toMachineMemorySizeGib,
bootstrapClusterProxy: bootstrapClusterProxy,
var toMachineSystemDiskSizeGib int64
fmt.Sscan(toMachineSystemDiskSizeGibStr, &toMachineSystemDiskSizeGib)
By("Check if all the machines have scaled down resource config (memory size, VCPUSockets, vcpusPerSocket)")
testHelper.verifyResourceConfigOnNutanixMachines(ctx, verifyResourceConfigOnNutanixMachinesParams{
clusterName: clusterName,
namespace: namespace,
toMachineMemorySizeGib: toMachineMemorySizeGib,
toMachineSystemDiskSizeGib: toMachineSystemDiskSizeGib,
toMachineVCPUSockets: toMachineVCPUSockets,
toMachineVCPUsPerSocket: toMachineVCPUsPerSocket,
bootstrapClusterProxy: bootstrapClusterProxy,
})

By("PASSED!")
Expand All @@ -124,7 +139,7 @@ var _ = Describe("When scaling down cluster with topology ", Label("clusterclass

kube127 := testHelper.getVariableFromE2eConfig("KUBERNETES_VERSION_v1_27")
kube127Image := testHelper.getVariableFromE2eConfig("NUTANIX_MACHINE_TEMPLATE_IMAGE_NAME_v1_27")
scaleDownMemorySizeWorkflow(kube127, kube127Image, "4Gi", "3Gi")
scaleDownWorkflow(kube127, kube127Image, "4Gi", "3Gi", "40Gi", "40Gi", 2, 2, 1, 1)
})

It("Scale down a cluster with CP and Worker node machine memory size from 4Gi to 3Gi with Kube128", func() {
Expand All @@ -133,7 +148,7 @@ var _ = Describe("When scaling down cluster with topology ", Label("clusterclass

kube128 := testHelper.getVariableFromE2eConfig("KUBERNETES_VERSION_v1_28")
kube128Image := testHelper.getVariableFromE2eConfig("NUTANIX_MACHINE_TEMPLATE_IMAGE_NAME_v1_28")
scaleDownMemorySizeWorkflow(kube128, kube128Image, "4Gi", "3Gi")
scaleDownWorkflow(kube127, kube127Image, "4Gi", "3Gi", "40Gi", "40Gi", 2, 2, 1, 1)
})

It("Scale down a cluster with CP and Worker node machine memory size from 4Gi to 3Gi with Kube129", func() {
Expand All @@ -142,7 +157,62 @@ var _ = Describe("When scaling down cluster with topology ", Label("clusterclass

Kube129 := testHelper.getVariableFromE2eConfig("KUBERNETES_VERSION_v1_29")
Kube129Image := testHelper.getVariableFromE2eConfig("NUTANIX_MACHINE_TEMPLATE_IMAGE_NAME_v1_29")
scaleDownMemorySizeWorkflow(Kube129, Kube129Image, "4Gi", "3Gi")
scaleDownWorkflow(kube127, kube127Image, "4Gi", "3Gi", "40Gi", "40Gi", 2, 2, 1, 1)
})

It("Scale down a cluster with CP and Worker node VCPUSockets from 3 to 2 with Kube127", func() {
clusterName = testHelper.generateTestClusterName(specName)
Expect(clusterName).NotTo(BeNil())

kube127 := testHelper.getVariableFromE2eConfig("KUBERNETES_VERSION_v1_27")
kube127Image := testHelper.getVariableFromE2eConfig("NUTANIX_MACHINE_TEMPLATE_IMAGE_NAME_v1_27")
scaleDownWorkflow(kube127, kube127Image, "4Gi", "3Gi", "40Gi", "40Gi", 3, 2, 1, 1)
})

It("Scale down a cluster with CP and Worker node VCPUSockets from 3 to 2 with Kube128", func() {
clusterName = testHelper.generateTestClusterName(specName)
Expect(clusterName).NotTo(BeNil())

kube128 := testHelper.getVariableFromE2eConfig("KUBERNETES_VERSION_v1_28")
kube128Image := testHelper.getVariableFromE2eConfig("NUTANIX_MACHINE_TEMPLATE_IMAGE_NAME_v1_28")
scaleDownWorkflow(kube127, kube127Image, "4Gi", "3Gi", "40Gi", "40Gi", 3, 2, 1, 1)
})

It("Scale down a cluster with CP and Worker node machine VCPUSockets from 3 to 2 with Kube129", func() {
clusterName = testHelper.generateTestClusterName(specName)
Expect(clusterName).NotTo(BeNil())

Kube129 := testHelper.getVariableFromE2eConfig("KUBERNETES_VERSION_v1_29")
Kube129Image := testHelper.getVariableFromE2eConfig("NUTANIX_MACHINE_TEMPLATE_IMAGE_NAME_v1_29")
scaleDownWorkflow(kube127, kube127Image, "4Gi", "3Gi", "40Gi", "40Gi", 3, 2, 1, 1)
})

It("Scale down a cluster with CP and Worker node vcpu per socket from 2 to 1 with Kube127", func() {
clusterName = testHelper.generateTestClusterName(specName)
Expect(clusterName).NotTo(BeNil())

kube127 := testHelper.getVariableFromE2eConfig("KUBERNETES_VERSION_v1_27")
kube127Image := testHelper.getVariableFromE2eConfig("NUTANIX_MACHINE_TEMPLATE_IMAGE_NAME_v1_27")
scaleDownWorkflow(kube127, kube127Image, "4Gi", "3Gi", "40Gi", "40Gi", 3, 3, 2, 1)
})

It("Scale down a cluster with CP and Worker node vcpu per socket from 2 to 1 with Kube128", func() {
clusterName = testHelper.generateTestClusterName(specName)
Expect(clusterName).NotTo(BeNil())

kube128 := testHelper.getVariableFromE2eConfig("KUBERNETES_VERSION_v1_28")
kube128Image := testHelper.getVariableFromE2eConfig("NUTANIX_MACHINE_TEMPLATE_IMAGE_NAME_v1_28")
scaleDownWorkflow(kube127, kube127Image, "4Gi", "3Gi", "40Gi", "40Gi", 3, 3, 2, 1)
})

It("Scale down a cluster with CP and Worker node machine vcpu per socket from 2 to 1 with Kube129", func() {
clusterName = testHelper.generateTestClusterName(specName)
Expect(clusterName).NotTo(BeNil())

Kube129 := testHelper.getVariableFromE2eConfig("KUBERNETES_VERSION_v1_29")
Kube129Image := testHelper.getVariableFromE2eConfig("NUTANIX_MACHINE_TEMPLATE_IMAGE_NAME_v1_29")
scaleDownWorkflow(kube127, kube127Image, "4Gi", "3Gi", "40Gi", "40Gi", 3, 3, 2, 1)
})

TODO add system disk tests
})
2 changes: 1 addition & 1 deletion test/e2e/clusterclass_changes_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,7 @@ import (
capi_e2e "sigs.k8s.io/cluster-api/test/e2e"
)

var _ = Describe("When testing ClusterClass changes", Label("clusterclass"), func() {
var _ = Describe("When mutating ClusterClass fields", Label("clusterclass", "capx-feature-test"), func() {
capi_e2e.ClusterClassChangesSpec(ctx, func() capi_e2e.ClusterClassChangesSpecInput {
return capi_e2e.ClusterClassChangesSpecInput{
E2EConfig: e2eConfig,
Expand Down
22 changes: 15 additions & 7 deletions test/e2e/test_helpers.go
Original file line number Diff line number Diff line change
Expand Up @@ -151,7 +151,7 @@ type testHelperInterface interface {
verifyFailureMessageOnClusterMachines(ctx context.Context, params verifyFailureMessageOnClusterMachinesParams)
verifyGPUNutanixMachines(ctx context.Context, params verifyGPUNutanixMachinesParams)
verifyProjectNutanixMachines(ctx context.Context, params verifyProjectNutanixMachinesParams)
verifyMemorySizeOnNutanixMachines(ctx context.Context, params verifyMemorySizeOnNutanixMachinesParams)
verifyResourceConfigOnNutanixMachines(ctx context.Context, params verifyResourceConfigOnNutanixMachinesParams)
}

type testHelper struct {
Expand Down Expand Up @@ -581,14 +581,17 @@ func (t testHelper) verifyCategoriesNutanixMachines(ctx context.Context, cluster
}
}

type verifyMemorySizeOnNutanixMachinesParams struct {
clusterName string
namespace *corev1.Namespace
toMachineMemorySizeGib int64
bootstrapClusterProxy framework.ClusterProxy
type verifyResourceConfigOnNutanixMachinesParams struct {
clusterName string
namespace *corev1.Namespace
toMachineMemorySizeGib int64
toMachineSystemDiskSizeGib int64
toMachineVCPUSockets int64
toMachineVCPUsPerSocket int64
bootstrapClusterProxy framework.ClusterProxy
}

func (t testHelper) verifyMemorySizeOnNutanixMachines(ctx context.Context, params verifyMemorySizeOnNutanixMachinesParams) {
func (t testHelper) verifyResourceConfigOnNutanixMachines(ctx context.Context, params verifyResourceConfigOnNutanixMachinesParams) {
Eventually(
func(g Gomega) {
nutanixMachines := t.getMachinesForCluster(ctx,
Expand All @@ -603,6 +606,11 @@ func (t testHelper) verifyMemorySizeOnNutanixMachines(ctx context.Context, param
g.Expect(err).ShouldNot(HaveOccurred())
vmMemorySizeInMib := *vm.Status.Resources.MemorySizeMib
g.Expect(vmMemorySizeInMib).To(Equal(params.toMachineMemorySizeGib*1024), "expected memory size of VMs to be equal to %d but was %d", params.toMachineMemorySizeGib*1024, vmMemorySizeInMib)
vmNumSockets := *vm.Status.Resources.NumSockets
g.Expect(vmNumSockets).To(Equal(params.toMachineVCPUSockets), "expected num sockets of VMs to be equal to %d but was %d", params.toMachineVCPUSockets, vmNumSockets)
vmNumVcpusPerSocket := *vm.Status.Resources.NumVcpusPerSocket
g.Expect(vmNumVcpusPerSocket).To(Equal(params.toMachineVCPUsPerSocket), "expected vcpu per socket of VMs to be equal to %d but was %d", params.toMachineVCPUsPerSocket, vmNumVcpusPerSocket)
// TODO check system disk size as well
}
},
defaultTimeout,
Expand Down

0 comments on commit de0fc32

Please sign in to comment.