From ed317eea14400ad7c12bd6bc79f11da6a9c0e449 Mon Sep 17 00:00:00 2001 From: Rahul Ganesh Date: Sun, 17 Sep 2023 14:47:43 -0700 Subject: [PATCH] Add more test coverage Signed-off-by: Rahul Ganesh --- pkg/executables/govc_test.go | 42 ++++++++++----- pkg/providers/vsphere/vsphere.go | 12 +++-- pkg/providers/vsphere/vsphere_test.go | 73 ++++++++++++++++++++++----- 3 files changed, 98 insertions(+), 29 deletions(-) diff --git a/pkg/executables/govc_test.go b/pkg/executables/govc_test.go index 3661f6174b1d3..f3c03109167e1 100644 --- a/pkg/executables/govc_test.go +++ b/pkg/executables/govc_test.go @@ -1660,28 +1660,46 @@ func TestGovcGetResourcePoolInfo(t *testing.T) { }{ { testName: "pool_info_memory_limit_set", - response: `Name: Test-ResourcePool - Path: /SDDC-Datacenter/host/Cluster-1/Resources/Test-ResourcePool - Mem Usage: 100MB (11.3%) - Mem Shares: normal - Mem Reservation: 0MB (expandable=true) - Mem Limit: 1000MB`, + response: `Name: Test-ResourcePool + Path: /SDDC-Datacenter/host/Cluster-1/Resources/Test-ResourcePool + Mem Usage: 100MB (11.3%) + Mem Shares: normal + Mem Reservation: 0MB (expandable=true) + Mem Limit: 1000MB`, govcErr: nil, wantErr: nil, wantMemInfo: map[string]int{executables.MemoryAvailable: 900}, }, { testName: "pool_info_memory_limit_unset", - response: `Name: Test-ResourcePool - Path: /SDDC-Datacenter/host/Cluster-1/Resources/Test-ResourcePool - Mem Usage: 100MB (11.3%) - Mem Shares: normal - Mem Reservation: 0MB (expandable=true) - Mem Limit: -1MB`, + response: `Name: Test-ResourcePool + Path: /SDDC-Datacenter/host/Cluster-1/Resources/Test-ResourcePool + Mem Usage: 100MB (11.3%) + Mem Shares: normal + Mem Reservation: 0MB (expandable=true) + Mem Limit: -1MB`, govcErr: nil, wantErr: nil, wantMemInfo: map[string]int{executables.MemoryAvailable: -1}, }, + { + testName: "pool_info_memory_usage_corrupt", + response: `Name: Test-ResourcePool + Mem Usage:corrupt-val + Mem Limit:-1MB`, + govcErr: nil, + wantErr: fmt.Errorf("unable to obtain memory usage for resource pool corrupt-val: strconv.Atoi: parsing \"-\": invalid syntax"), + wantMemInfo: nil, + }, + { + testName: "pool_info_memory_limit_corrupt", + response: `Name: Test-ResourcePool + Mem Usage:100 + Mem Limit:corrupt-val`, + govcErr: nil, + wantErr: fmt.Errorf("unable to obtain memory limit for resource pool corrupt-val: strconv.Atoi: parsing \"-\": invalid syntax"), + wantMemInfo: nil, + }, { testName: "pool_info_error", response: "", diff --git a/pkg/providers/vsphere/vsphere.go b/pkg/providers/vsphere/vsphere.go index 58a967d463160..9e4066fdfd017 100644 --- a/pkg/providers/vsphere/vsphere.go +++ b/pkg/providers/vsphere/vsphere.go @@ -52,7 +52,6 @@ const ( backOffPeriod = 5 * time.Second disk1 = "Hard disk 1" disk2 = "Hard disk 2" - cpuAvailable = "CPU_Available" MemoryAvailable = "Memory_Available" ethtoolDaemonSetName = "vsphere-disable-udp-offload" ) @@ -635,13 +634,16 @@ func (p *vsphereProvider) calculateResourcePoolMemoryUsage(ctx context.Context, if err != nil { return err } - availableMemoryMiB += prevUsage + // update the available memory with previous usage only when memory limit is set + if availableMemoryMiB != -1 { + availableMemoryMiB += prevUsage + } updateMemoryUsageMap(mc, needMemoryMiB, availableMemoryMiB, prevUsage, mu) return nil } func updateMemoryUsageMap(mc *v1alpha1.VSphereMachineConfig, needMiB, availableMiB, prevUsage int, mu map[string]*memoryUsage) { - if _, ok := mu[mc.Spec.ResourcePool]; ok { + if _, ok := mu[mc.Spec.ResourcePool]; ok && mu[mc.Spec.ResourcePool].availableMemoryMiB != -1 { mu[mc.Spec.ResourcePool].needMemoryMiB += needMiB mu[mc.Spec.ResourcePool].availableMemoryMiB += prevUsage } else { @@ -658,14 +660,14 @@ func (p *vsphereProvider) validateMemoryUsageForCreate(ctx context.Context, clus cpMachineConfig := clusterSpec.controlPlaneMachineConfig() controlPlaneAvailableMiB, controlPlaneNeedMiB, err := p.getMachineConfigMemoryRequirements(ctx, datacenter, cpMachineConfig, clusterSpec.Cluster.Spec.ControlPlaneConfiguration.Count) if err != nil { - return err + return fmt.Errorf("calculating memory usage for control plane: %v", err) } updateMemoryUsageMap(cpMachineConfig, controlPlaneNeedMiB, controlPlaneAvailableMiB, 0, memoryUsage) for _, workerNodeGroupConfiguration := range clusterSpec.Cluster.Spec.WorkerNodeGroupConfigurations { workerMachineConfig := clusterSpec.workerMachineConfig(workerNodeGroupConfiguration) workerAvailableMiB, workerNeedMiB, err := p.getMachineConfigMemoryRequirements(ctx, datacenter, workerMachineConfig, *workerNodeGroupConfiguration.Count) if err != nil { - return err + return fmt.Errorf("calculating memory usage for worker node groups: %v", err) } updateMemoryUsageMap(workerMachineConfig, workerNeedMiB, workerAvailableMiB, 0, memoryUsage) } diff --git a/pkg/providers/vsphere/vsphere_test.go b/pkg/providers/vsphere/vsphere_test.go index 2d5137de62f8d..97d090f3c2e4b 100644 --- a/pkg/providers/vsphere/vsphere_test.go +++ b/pkg/providers/vsphere/vsphere_test.go @@ -1866,14 +1866,15 @@ func TestSetupAndValidateUpgradeClusterSameMachineConfigforCPandEtcd(t *testing. mockCtrl := gomock.NewController(t) kubectl := mocks.NewMockProviderKubectlClient(mockCtrl) provider.providerKubectlClient = kubectl - cluster := &types.Cluster{} kubectl.EXPECT().GetEksaCluster(ctx, cluster, clusterSpec.Cluster.GetName()).Return(clusterSpec.Cluster.DeepCopy(), nil).Times(3) - kubectl.EXPECT().GetEksaVSphereMachineConfig(ctx, gomock.Any(), cluster.KubeconfigFile, clusterSpec.Cluster.GetNamespace()).Times(5) - // for _, mc := range clusterSpec.VSphereMachineConfigs { - // kubectl.EXPECT().GetEksaVSphereMachineConfig(ctx, gomock.Any(), cluster.KubeconfigFile, clusterSpec.Cluster.GetNamespace()).Return(mc, nil) + for _, mc := range clusterSpec.VSphereMachineConfigs { + kubectl.EXPECT().GetEksaVSphereMachineConfig(ctx, gomock.Any(), cluster.KubeconfigFile, clusterSpec.Cluster.GetNamespace()).Return(mc, nil).AnyTimes() + } + // resourcePoolResponse := map[string]int{ + // "Memory_Available": -1, // } - + // provider.govc.EXPECT().GetResourcePoolInfo(tt.ctx, tt.clusterSpec.VSphereDatacenter.Spec.Datacenter, tt.clusterSpec.VSphereMachineConfigs[controlPlaneMachineConfigName].Spec.ResourcePool).Return(resourcePoolResponse, nil) err := provider.SetupAndValidateUpgradeCluster(ctx, cluster, clusterSpec, clusterSpec) if err != nil { t.Fatalf("unexpected failure %v", err) @@ -3487,7 +3488,7 @@ func TestValidateMachineConfigsMemoryUsageCreateSuccess(t *testing.T) { datacenter := tt.clusterSpec.VSphereDatacenter.Spec.Datacenter machineConfigs[tt.clusterSpec.Cluster.Spec.ExternalEtcdConfiguration.MachineGroupRef.Name].Spec.ResourcePool = "test-resourcepool" for _, config := range machineConfigs { - tt.govc.EXPECT().GetResourcePoolInfo(tt.ctx, datacenter, config.Spec.ResourcePool).Return(map[string]int{executables.MemoryAvailable: -1}, nil) + tt.govc.EXPECT().GetResourcePoolInfo(tt.ctx, datacenter, config.Spec.ResourcePool).Return(map[string]int{MemoryAvailable: -1}, nil) } vSpec := NewSpec(tt.clusterSpec) err := tt.provider.validateMemoryUsageForCreate(tt.ctx, vSpec) @@ -3501,7 +3502,7 @@ func TestValidateMachineConfigsMemoryUsageCreateError(t *testing.T) { machineConfigs := tt.clusterSpec.VSphereMachineConfigs datacenter := tt.clusterSpec.VSphereDatacenter.Spec.Datacenter for _, config := range machineConfigs { - tt.govc.EXPECT().GetResourcePoolInfo(tt.ctx, datacenter, config.Spec.ResourcePool).Return(map[string]int{executables.MemoryAvailable: 100000}, nil) + tt.govc.EXPECT().GetResourcePoolInfo(tt.ctx, datacenter, config.Spec.ResourcePool).Return(map[string]int{MemoryAvailable: 100000}, nil) } vSpec := NewSpec(tt.clusterSpec) err := tt.provider.validateMemoryUsageForCreate(tt.ctx, vSpec) @@ -3509,20 +3510,46 @@ func TestValidateMachineConfigsMemoryUsageCreateError(t *testing.T) { thenErrorExpected(t, fmt.Sprintf("not enough memory avaialable in resource pool %v for given memoryMiB and count for respective machine groups", resourcePool), err) } +func TestSetupAndValidateCreateClusterMemoryUsageError(t *testing.T) { + tt := newProviderTest(t) + tt.setExpectationForSetup() + tt.setExpectationForVCenterValidation() + tt.setExpectationsForDefaultDiskAndCloneModeGovcCalls() + tt.setExpectationsForMachineConfigsVCenterValidation() + datacenter := tt.clusterSpec.VSphereDatacenter.Spec.Datacenter + cpMachineConfig := tt.machineConfigs[tt.clusterSpec.Cluster.Spec.ControlPlaneConfiguration.MachineGroupRef.Name] + for _, mc := range tt.machineConfigs { + tt.govc.EXPECT().SearchTemplate(tt.ctx, tt.datacenterConfig.Spec.Datacenter, mc.Spec.Template).Return(mc.Spec.Template, nil).AnyTimes() + } + tt.govc.EXPECT().GetTags(tt.ctx, cpMachineConfig.Spec.Template).Return([]string{eksd119ReleaseTag, ubuntuOSTag}, nil) + tt.govc.EXPECT().ListTags(tt.ctx) + tt.govc.EXPECT().GetWorkloadAvailableSpace(tt.ctx, cpMachineConfig.Spec.Datastore).Return(1000.0, nil).AnyTimes() + tt.govc.EXPECT().GetResourcePoolInfo(tt.ctx, datacenter, cpMachineConfig.Spec.ResourcePool).Return(nil, fmt.Errorf("error")) + err := tt.provider.SetupAndValidateCreateCluster(tt.ctx, tt.clusterSpec) + thenErrorExpected(t, "validating vsphere machine configs resource pool memory usage: calculating memory usage for control plane: error", err) +} + func TestValidateMachineConfigsMemoryUsageUpgradeSuccess(t *testing.T) { tt := newProviderTest(t) cluster := &types.Cluster{ Name: "test", } tt.kubectl.EXPECT().GetEksaCluster(tt.ctx, cluster, tt.clusterSpec.Cluster.GetName()).Return(tt.clusterSpec.Cluster.DeepCopy(), nil) + vSpec := NewSpec(tt.clusterSpec) + vSpec.Cluster.Spec.ControlPlaneConfiguration.Count += 2 + // change the worker node group to test there is no negative count scenario + wnMachineConfig := getMachineConfig(vSpec.Spec, "test-wn") + newMachineConfigName := "new-test-wn" + newWorkerMachineConfig := wnMachineConfig.DeepCopy() + newWorkerMachineConfig.Name = newMachineConfigName + vSpec.VSphereMachineConfigs[newMachineConfigName] = newWorkerMachineConfig + vSpec.Cluster.Spec.WorkerNodeGroupConfigurations[0].MachineGroupRef.Name = newMachineConfigName machineConfigs := tt.clusterSpec.VSphereMachineConfigs datacenter := tt.clusterSpec.VSphereDatacenter.Spec.Datacenter for _, config := range machineConfigs { - tt.kubectl.EXPECT().GetEksaVSphereMachineConfig(tt.ctx, config.Name, cluster.KubeconfigFile, config.Namespace).AnyTimes() - tt.govc.EXPECT().GetResourcePoolInfo(tt.ctx, datacenter, config.Spec.ResourcePool).Return(map[string]int{executables.MemoryAvailable: -1}, nil) + tt.kubectl.EXPECT().GetEksaVSphereMachineConfig(tt.ctx, config.Name, cluster.KubeconfigFile, config.Namespace).Return(config, nil).AnyTimes() + tt.govc.EXPECT().GetResourcePoolInfo(tt.ctx, datacenter, config.Spec.ResourcePool).Return(map[string]int{MemoryAvailable: -1}, nil).AnyTimes() } - vSpec := NewSpec(tt.clusterSpec) - vSpec.Cluster.Spec.ControlPlaneConfiguration.Count += 2 err := tt.provider.validateMemoryUsageForUpgrade(tt.ctx, vSpec, cluster) if err != nil { t.Fatalf("unexpected failure %v", err) @@ -3539,7 +3566,7 @@ func TestValidateMachineConfigsMemoryUsageUpgradeError(t *testing.T) { datacenter := tt.clusterSpec.VSphereDatacenter.Spec.Datacenter for _, config := range machineConfigs { tt.kubectl.EXPECT().GetEksaVSphereMachineConfig(tt.ctx, config.Name, cluster.KubeconfigFile, config.Namespace).AnyTimes() - tt.govc.EXPECT().GetResourcePoolInfo(tt.ctx, datacenter, config.Spec.ResourcePool).Return(map[string]int{executables.MemoryAvailable: 10000}, nil) + tt.govc.EXPECT().GetResourcePoolInfo(tt.ctx, datacenter, config.Spec.ResourcePool).Return(map[string]int{MemoryAvailable: 10000}, nil) } vSpec := NewSpec(tt.clusterSpec) vSpec.Cluster.Spec.ControlPlaneConfiguration.Count += 2 @@ -3548,6 +3575,28 @@ func TestValidateMachineConfigsMemoryUsageUpgradeError(t *testing.T) { thenErrorExpected(t, fmt.Sprintf("not enough memory avaialable in resource pool %v for given memoryMiB and count for respective machine groups", resourcePool), err) } +func TestSetupAndValidateUpgradeClusterMemoryUsageError(t *testing.T) { + tt := newProviderTest(t) + cluster := &types.Cluster{ + Name: "test", + } + tt.setExpectationForSetup() + tt.setExpectationForVCenterValidation() + tt.setExpectationsForDefaultDiskAndCloneModeGovcCalls() + tt.setExpectationsForMachineConfigsVCenterValidation() + tt.kubectl.EXPECT().GetEksaCluster(tt.ctx, cluster, tt.clusterSpec.Cluster.GetName()).Return(tt.clusterSpec.Cluster.DeepCopy(), nil).Times(2) + tt.kubectl.EXPECT().GetEksaVSphereMachineConfig(tt.ctx, gomock.Any(), cluster.KubeconfigFile, tt.clusterSpec.Cluster.GetNamespace()).AnyTimes() + cpMachineConfig := tt.machineConfigs[tt.clusterSpec.Cluster.Spec.ControlPlaneConfiguration.MachineGroupRef.Name] + tt.govc.EXPECT().SearchTemplate(tt.ctx, tt.datacenterConfig.Spec.Datacenter, cpMachineConfig.Spec.Template).Return(cpMachineConfig.Spec.Template, nil).AnyTimes() + tt.govc.EXPECT().GetTags(tt.ctx, cpMachineConfig.Spec.Template).Return([]string{eksd119ReleaseTag, ubuntuOSTag}, nil) + tt.govc.EXPECT().ListTags(tt.ctx) + tt.govc.EXPECT().GetWorkloadAvailableSpace(tt.ctx, cpMachineConfig.Spec.Datastore).Return(1000.0, nil).AnyTimes() + datacenter := tt.clusterSpec.VSphereDatacenter.Spec.Datacenter + tt.govc.EXPECT().GetResourcePoolInfo(tt.ctx, datacenter, cpMachineConfig.Spec.ResourcePool).Return(nil, fmt.Errorf("error")) + err := tt.provider.SetupAndValidateUpgradeCluster(tt.ctx, cluster, tt.clusterSpec, tt.clusterSpec) + thenErrorExpected(t, "validating vsphere machine configs resource pool memory usage: calculating memory usage for control plane: error", err) +} + func TestValidateMachineConfigsNameUniquenessSuccess(t *testing.T) { tt := newProviderTest(t) cluster := &types.Cluster{