Skip to content

Commit

Permalink
Add more test coverage
Browse files Browse the repository at this point in the history
Signed-off-by: Rahul Ganesh <[email protected]>
  • Loading branch information
Rahul Ganesh committed Sep 17, 2023
1 parent b2026b4 commit ed317ee
Show file tree
Hide file tree
Showing 3 changed files with 98 additions and 29 deletions.
42 changes: 30 additions & 12 deletions pkg/executables/govc_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -1660,28 +1660,46 @@ func TestGovcGetResourcePoolInfo(t *testing.T) {
}{
{
testName: "pool_info_memory_limit_set",
response: `Name: Test-ResourcePool
Path: /SDDC-Datacenter/host/Cluster-1/Resources/Test-ResourcePool
Mem Usage: 100MB (11.3%)
Mem Shares: normal
Mem Reservation: 0MB (expandable=true)
Mem Limit: 1000MB`,
response: `Name: Test-ResourcePool
Path: /SDDC-Datacenter/host/Cluster-1/Resources/Test-ResourcePool
Mem Usage: 100MB (11.3%)
Mem Shares: normal
Mem Reservation: 0MB (expandable=true)
Mem Limit: 1000MB`,
govcErr: nil,
wantErr: nil,
wantMemInfo: map[string]int{executables.MemoryAvailable: 900},
},
{
testName: "pool_info_memory_limit_unset",
response: `Name: Test-ResourcePool
Path: /SDDC-Datacenter/host/Cluster-1/Resources/Test-ResourcePool
Mem Usage: 100MB (11.3%)
Mem Shares: normal
Mem Reservation: 0MB (expandable=true)
Mem Limit: -1MB`,
response: `Name: Test-ResourcePool
Path: /SDDC-Datacenter/host/Cluster-1/Resources/Test-ResourcePool
Mem Usage: 100MB (11.3%)
Mem Shares: normal
Mem Reservation: 0MB (expandable=true)
Mem Limit: -1MB`,
govcErr: nil,
wantErr: nil,
wantMemInfo: map[string]int{executables.MemoryAvailable: -1},
},
{
testName: "pool_info_memory_usage_corrupt",
response: `Name: Test-ResourcePool
Mem Usage:corrupt-val
Mem Limit:-1MB`,
govcErr: nil,
wantErr: fmt.Errorf("unable to obtain memory usage for resource pool corrupt-val: strconv.Atoi: parsing \"-\": invalid syntax"),
wantMemInfo: nil,
},
{
testName: "pool_info_memory_limit_corrupt",
response: `Name: Test-ResourcePool
Mem Usage:100
Mem Limit:corrupt-val`,
govcErr: nil,
wantErr: fmt.Errorf("unable to obtain memory limit for resource pool corrupt-val: strconv.Atoi: parsing \"-\": invalid syntax"),
wantMemInfo: nil,
},
{
testName: "pool_info_error",
response: "",
Expand Down
12 changes: 7 additions & 5 deletions pkg/providers/vsphere/vsphere.go
Original file line number Diff line number Diff line change
Expand Up @@ -52,7 +52,6 @@ const (
backOffPeriod = 5 * time.Second
disk1 = "Hard disk 1"
disk2 = "Hard disk 2"
cpuAvailable = "CPU_Available"
MemoryAvailable = "Memory_Available"
ethtoolDaemonSetName = "vsphere-disable-udp-offload"
)
Expand Down Expand Up @@ -635,13 +634,16 @@ func (p *vsphereProvider) calculateResourcePoolMemoryUsage(ctx context.Context,
if err != nil {
return err
}

Check warning on line 636 in pkg/providers/vsphere/vsphere.go

View check run for this annotation

Codecov / codecov/patch

pkg/providers/vsphere/vsphere.go#L635-L636

Added lines #L635 - L636 were not covered by tests
availableMemoryMiB += prevUsage
// update the available memory with previous usage only when memory limit is set
if availableMemoryMiB != -1 {
availableMemoryMiB += prevUsage
}
updateMemoryUsageMap(mc, needMemoryMiB, availableMemoryMiB, prevUsage, mu)
return nil
}

func updateMemoryUsageMap(mc *v1alpha1.VSphereMachineConfig, needMiB, availableMiB, prevUsage int, mu map[string]*memoryUsage) {
if _, ok := mu[mc.Spec.ResourcePool]; ok {
if _, ok := mu[mc.Spec.ResourcePool]; ok && mu[mc.Spec.ResourcePool].availableMemoryMiB != -1 {
mu[mc.Spec.ResourcePool].needMemoryMiB += needMiB
mu[mc.Spec.ResourcePool].availableMemoryMiB += prevUsage
} else {
Expand All @@ -658,14 +660,14 @@ func (p *vsphereProvider) validateMemoryUsageForCreate(ctx context.Context, clus
cpMachineConfig := clusterSpec.controlPlaneMachineConfig()
controlPlaneAvailableMiB, controlPlaneNeedMiB, err := p.getMachineConfigMemoryRequirements(ctx, datacenter, cpMachineConfig, clusterSpec.Cluster.Spec.ControlPlaneConfiguration.Count)
if err != nil {
return err
return fmt.Errorf("calculating memory usage for control plane: %v", err)
}
updateMemoryUsageMap(cpMachineConfig, controlPlaneNeedMiB, controlPlaneAvailableMiB, 0, memoryUsage)
for _, workerNodeGroupConfiguration := range clusterSpec.Cluster.Spec.WorkerNodeGroupConfigurations {
workerMachineConfig := clusterSpec.workerMachineConfig(workerNodeGroupConfiguration)
workerAvailableMiB, workerNeedMiB, err := p.getMachineConfigMemoryRequirements(ctx, datacenter, workerMachineConfig, *workerNodeGroupConfiguration.Count)
if err != nil {
return err
return fmt.Errorf("calculating memory usage for worker node groups: %v", err)
}

Check warning on line 671 in pkg/providers/vsphere/vsphere.go

View check run for this annotation

Codecov / codecov/patch

pkg/providers/vsphere/vsphere.go#L670-L671

Added lines #L670 - L671 were not covered by tests
updateMemoryUsageMap(workerMachineConfig, workerNeedMiB, workerAvailableMiB, 0, memoryUsage)
}
Expand Down
73 changes: 61 additions & 12 deletions pkg/providers/vsphere/vsphere_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -1866,14 +1866,15 @@ func TestSetupAndValidateUpgradeClusterSameMachineConfigforCPandEtcd(t *testing.
mockCtrl := gomock.NewController(t)
kubectl := mocks.NewMockProviderKubectlClient(mockCtrl)
provider.providerKubectlClient = kubectl

cluster := &types.Cluster{}
kubectl.EXPECT().GetEksaCluster(ctx, cluster, clusterSpec.Cluster.GetName()).Return(clusterSpec.Cluster.DeepCopy(), nil).Times(3)
kubectl.EXPECT().GetEksaVSphereMachineConfig(ctx, gomock.Any(), cluster.KubeconfigFile, clusterSpec.Cluster.GetNamespace()).Times(5)
// for _, mc := range clusterSpec.VSphereMachineConfigs {
// kubectl.EXPECT().GetEksaVSphereMachineConfig(ctx, gomock.Any(), cluster.KubeconfigFile, clusterSpec.Cluster.GetNamespace()).Return(mc, nil)
for _, mc := range clusterSpec.VSphereMachineConfigs {
kubectl.EXPECT().GetEksaVSphereMachineConfig(ctx, gomock.Any(), cluster.KubeconfigFile, clusterSpec.Cluster.GetNamespace()).Return(mc, nil).AnyTimes()
}
// resourcePoolResponse := map[string]int{
// "Memory_Available": -1,
// }

// provider.govc.EXPECT().GetResourcePoolInfo(tt.ctx, tt.clusterSpec.VSphereDatacenter.Spec.Datacenter, tt.clusterSpec.VSphereMachineConfigs[controlPlaneMachineConfigName].Spec.ResourcePool).Return(resourcePoolResponse, nil)
err := provider.SetupAndValidateUpgradeCluster(ctx, cluster, clusterSpec, clusterSpec)
if err != nil {
t.Fatalf("unexpected failure %v", err)
Expand Down Expand Up @@ -3487,7 +3488,7 @@ func TestValidateMachineConfigsMemoryUsageCreateSuccess(t *testing.T) {
datacenter := tt.clusterSpec.VSphereDatacenter.Spec.Datacenter
machineConfigs[tt.clusterSpec.Cluster.Spec.ExternalEtcdConfiguration.MachineGroupRef.Name].Spec.ResourcePool = "test-resourcepool"
for _, config := range machineConfigs {
tt.govc.EXPECT().GetResourcePoolInfo(tt.ctx, datacenter, config.Spec.ResourcePool).Return(map[string]int{executables.MemoryAvailable: -1}, nil)
tt.govc.EXPECT().GetResourcePoolInfo(tt.ctx, datacenter, config.Spec.ResourcePool).Return(map[string]int{MemoryAvailable: -1}, nil)
}
vSpec := NewSpec(tt.clusterSpec)
err := tt.provider.validateMemoryUsageForCreate(tt.ctx, vSpec)
Expand All @@ -3501,28 +3502,54 @@ func TestValidateMachineConfigsMemoryUsageCreateError(t *testing.T) {
machineConfigs := tt.clusterSpec.VSphereMachineConfigs
datacenter := tt.clusterSpec.VSphereDatacenter.Spec.Datacenter
for _, config := range machineConfigs {
tt.govc.EXPECT().GetResourcePoolInfo(tt.ctx, datacenter, config.Spec.ResourcePool).Return(map[string]int{executables.MemoryAvailable: 100000}, nil)
tt.govc.EXPECT().GetResourcePoolInfo(tt.ctx, datacenter, config.Spec.ResourcePool).Return(map[string]int{MemoryAvailable: 100000}, nil)
}
vSpec := NewSpec(tt.clusterSpec)
err := tt.provider.validateMemoryUsageForCreate(tt.ctx, vSpec)
resourcePool := machineConfigs[tt.clusterSpec.Cluster.Spec.ExternalEtcdConfiguration.MachineGroupRef.Name].Spec.ResourcePool
thenErrorExpected(t, fmt.Sprintf("not enough memory avaialable in resource pool %v for given memoryMiB and count for respective machine groups", resourcePool), err)
}

func TestSetupAndValidateCreateClusterMemoryUsageError(t *testing.T) {
tt := newProviderTest(t)
tt.setExpectationForSetup()
tt.setExpectationForVCenterValidation()
tt.setExpectationsForDefaultDiskAndCloneModeGovcCalls()
tt.setExpectationsForMachineConfigsVCenterValidation()
datacenter := tt.clusterSpec.VSphereDatacenter.Spec.Datacenter
cpMachineConfig := tt.machineConfigs[tt.clusterSpec.Cluster.Spec.ControlPlaneConfiguration.MachineGroupRef.Name]
for _, mc := range tt.machineConfigs {
tt.govc.EXPECT().SearchTemplate(tt.ctx, tt.datacenterConfig.Spec.Datacenter, mc.Spec.Template).Return(mc.Spec.Template, nil).AnyTimes()
}
tt.govc.EXPECT().GetTags(tt.ctx, cpMachineConfig.Spec.Template).Return([]string{eksd119ReleaseTag, ubuntuOSTag}, nil)
tt.govc.EXPECT().ListTags(tt.ctx)
tt.govc.EXPECT().GetWorkloadAvailableSpace(tt.ctx, cpMachineConfig.Spec.Datastore).Return(1000.0, nil).AnyTimes()
tt.govc.EXPECT().GetResourcePoolInfo(tt.ctx, datacenter, cpMachineConfig.Spec.ResourcePool).Return(nil, fmt.Errorf("error"))
err := tt.provider.SetupAndValidateCreateCluster(tt.ctx, tt.clusterSpec)
thenErrorExpected(t, "validating vsphere machine configs resource pool memory usage: calculating memory usage for control plane: error", err)
}

func TestValidateMachineConfigsMemoryUsageUpgradeSuccess(t *testing.T) {
tt := newProviderTest(t)
cluster := &types.Cluster{
Name: "test",
}
tt.kubectl.EXPECT().GetEksaCluster(tt.ctx, cluster, tt.clusterSpec.Cluster.GetName()).Return(tt.clusterSpec.Cluster.DeepCopy(), nil)
vSpec := NewSpec(tt.clusterSpec)
vSpec.Cluster.Spec.ControlPlaneConfiguration.Count += 2
// change the worker node group to test there is no negative count scenario
wnMachineConfig := getMachineConfig(vSpec.Spec, "test-wn")
newMachineConfigName := "new-test-wn"
newWorkerMachineConfig := wnMachineConfig.DeepCopy()
newWorkerMachineConfig.Name = newMachineConfigName
vSpec.VSphereMachineConfigs[newMachineConfigName] = newWorkerMachineConfig
vSpec.Cluster.Spec.WorkerNodeGroupConfigurations[0].MachineGroupRef.Name = newMachineConfigName
machineConfigs := tt.clusterSpec.VSphereMachineConfigs
datacenter := tt.clusterSpec.VSphereDatacenter.Spec.Datacenter
for _, config := range machineConfigs {
tt.kubectl.EXPECT().GetEksaVSphereMachineConfig(tt.ctx, config.Name, cluster.KubeconfigFile, config.Namespace).AnyTimes()
tt.govc.EXPECT().GetResourcePoolInfo(tt.ctx, datacenter, config.Spec.ResourcePool).Return(map[string]int{executables.MemoryAvailable: -1}, nil)
tt.kubectl.EXPECT().GetEksaVSphereMachineConfig(tt.ctx, config.Name, cluster.KubeconfigFile, config.Namespace).Return(config, nil).AnyTimes()
tt.govc.EXPECT().GetResourcePoolInfo(tt.ctx, datacenter, config.Spec.ResourcePool).Return(map[string]int{MemoryAvailable: -1}, nil).AnyTimes()
}
vSpec := NewSpec(tt.clusterSpec)
vSpec.Cluster.Spec.ControlPlaneConfiguration.Count += 2
err := tt.provider.validateMemoryUsageForUpgrade(tt.ctx, vSpec, cluster)
if err != nil {
t.Fatalf("unexpected failure %v", err)
Expand All @@ -3539,7 +3566,7 @@ func TestValidateMachineConfigsMemoryUsageUpgradeError(t *testing.T) {
datacenter := tt.clusterSpec.VSphereDatacenter.Spec.Datacenter
for _, config := range machineConfigs {
tt.kubectl.EXPECT().GetEksaVSphereMachineConfig(tt.ctx, config.Name, cluster.KubeconfigFile, config.Namespace).AnyTimes()
tt.govc.EXPECT().GetResourcePoolInfo(tt.ctx, datacenter, config.Spec.ResourcePool).Return(map[string]int{executables.MemoryAvailable: 10000}, nil)
tt.govc.EXPECT().GetResourcePoolInfo(tt.ctx, datacenter, config.Spec.ResourcePool).Return(map[string]int{MemoryAvailable: 10000}, nil)
}
vSpec := NewSpec(tt.clusterSpec)
vSpec.Cluster.Spec.ControlPlaneConfiguration.Count += 2
Expand All @@ -3548,6 +3575,28 @@ func TestValidateMachineConfigsMemoryUsageUpgradeError(t *testing.T) {
thenErrorExpected(t, fmt.Sprintf("not enough memory avaialable in resource pool %v for given memoryMiB and count for respective machine groups", resourcePool), err)
}

func TestSetupAndValidateUpgradeClusterMemoryUsageError(t *testing.T) {
tt := newProviderTest(t)
cluster := &types.Cluster{
Name: "test",
}
tt.setExpectationForSetup()
tt.setExpectationForVCenterValidation()
tt.setExpectationsForDefaultDiskAndCloneModeGovcCalls()
tt.setExpectationsForMachineConfigsVCenterValidation()
tt.kubectl.EXPECT().GetEksaCluster(tt.ctx, cluster, tt.clusterSpec.Cluster.GetName()).Return(tt.clusterSpec.Cluster.DeepCopy(), nil).Times(2)
tt.kubectl.EXPECT().GetEksaVSphereMachineConfig(tt.ctx, gomock.Any(), cluster.KubeconfigFile, tt.clusterSpec.Cluster.GetNamespace()).AnyTimes()
cpMachineConfig := tt.machineConfigs[tt.clusterSpec.Cluster.Spec.ControlPlaneConfiguration.MachineGroupRef.Name]
tt.govc.EXPECT().SearchTemplate(tt.ctx, tt.datacenterConfig.Spec.Datacenter, cpMachineConfig.Spec.Template).Return(cpMachineConfig.Spec.Template, nil).AnyTimes()
tt.govc.EXPECT().GetTags(tt.ctx, cpMachineConfig.Spec.Template).Return([]string{eksd119ReleaseTag, ubuntuOSTag}, nil)
tt.govc.EXPECT().ListTags(tt.ctx)
tt.govc.EXPECT().GetWorkloadAvailableSpace(tt.ctx, cpMachineConfig.Spec.Datastore).Return(1000.0, nil).AnyTimes()
datacenter := tt.clusterSpec.VSphereDatacenter.Spec.Datacenter
tt.govc.EXPECT().GetResourcePoolInfo(tt.ctx, datacenter, cpMachineConfig.Spec.ResourcePool).Return(nil, fmt.Errorf("error"))
err := tt.provider.SetupAndValidateUpgradeCluster(tt.ctx, cluster, tt.clusterSpec, tt.clusterSpec)
thenErrorExpected(t, "validating vsphere machine configs resource pool memory usage: calculating memory usage for control plane: error", err)
}

func TestValidateMachineConfigsNameUniquenessSuccess(t *testing.T) {
tt := newProviderTest(t)
cluster := &types.Cluster{
Expand Down

0 comments on commit ed317ee

Please sign in to comment.