Skip to content

Commit

Permalink
address review comments - part 2
Browse files Browse the repository at this point in the history
  • Loading branch information
rishabh-11 committed Nov 26, 2024
1 parent d50a034 commit b5fca63
Show file tree
Hide file tree
Showing 9 changed files with 82 additions and 83 deletions.
41 changes: 20 additions & 21 deletions controllers/cluster/cluster_controller_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,7 @@ import (
"log"
"os"
"path/filepath"
"reflect"
"testing"
"time"

Expand Down Expand Up @@ -137,7 +138,7 @@ func testProberDedicatedEnvTest(t *testing.T) {
func testReconciliationAfterAPIServerIsDown(ctx context.Context, t *testing.T, testEnv *envtest.Environment, _ client.Client, reconciler *Reconciler, _ manager.Manager, cancelFn context.CancelFunc) {
var err error
g := NewWithT(t)
cluster, _, err := testutil.NewClusterBuilder().WithWorkerNames([]string{testutil.Worker1Name}).Build()
cluster, _, err := testutil.NewClusterBuilder().WithWorkerCount(1).Build()
g.Expect(err).ToNot(HaveOccurred())
cancelFn()
err = testEnv.ControlPlane.APIServer.Stop()
Expand Down Expand Up @@ -181,21 +182,21 @@ func testProberSharedEnvTest(t *testing.T) {
}

func testShootWorkerNodeConditions(g *WithT, crClient client.Client, reconciler *Reconciler) {
workerNodeConditions := map[string][]string{testutil.Worker1Name: {testutil.NodeConditionDiskPressure, testutil.NodeConditionMemoryPressure}}
cluster, shoot, err := testutil.NewClusterBuilder().WithWorkerNames([]string{testutil.Worker1Name}).WithWorkerNodeConditions(workerNodeConditions).Build()
workerNodeConditions := [][]string{{testutil.NodeConditionDiskPressure, testutil.NodeConditionMemoryPressure}}
cluster, shoot, err := testutil.NewClusterBuilder().WithWorkerCount(1).WithWorkerNodeConditions(workerNodeConditions).Build()
g.Expect(err).ToNot(HaveOccurred())
createCluster(g, crClient, cluster)
expectedWorkerNodeConditions := util.GetEffectiveNodeConditionsForWorkers(shoot)
proberShouldBePresent(g, reconciler, cluster, defaultKCMNodeMonitorGracePeriod, expectedWorkerNodeConditions)
// update the workers
updatedWorkerNodeConditions := map[string][]string{testutil.Worker1Name: {testutil.NodeConditionPIDPressure, testutil.NodeConditionNetworkReady}}
updatedWorkers := testutil.CreateWorkers([]string{testutil.Worker1Name}, updatedWorkerNodeConditions)
shoot.Spec.Provider.Workers = updatedWorkers
updatedWorkerNodeConditions := []string{testutil.NodeConditionPIDPressure, testutil.NodeConditionNetworkReady}
shoot.Spec.Provider.Workers[0].MachineControllerManagerSettings.NodeConditions = updatedWorkerNodeConditions
cluster.Spec.Shoot = runtime.RawExtension{
Object: shoot,
}
updateCluster(g, crClient, cluster)
proberShouldBePresent(g, reconciler, cluster, defaultKCMNodeMonitorGracePeriod, updatedWorkerNodeConditions)
expectedWorkerNodeConditions = util.GetEffectiveNodeConditionsForWorkers(shoot)
proberShouldBePresent(g, reconciler, cluster, defaultKCMNodeMonitorGracePeriod, expectedWorkerNodeConditions)
deleteClusterAndCheckIfProberRemoved(g, crClient, reconciler, cluster)
}

Expand Down Expand Up @@ -231,7 +232,7 @@ func updateShootHibernationSpec(g *WithT, crClient client.Client, cluster *garde
}

func testShootHibernation(g *WithT, crClient client.Client, reconciler *Reconciler) {
cluster, shoot, err := testutil.NewClusterBuilder().WithWorkerNames([]string{testutil.Worker1Name}).Build()
cluster, shoot, err := testutil.NewClusterBuilder().WithWorkerCount(1).Build()
g.Expect(err).ToNot(HaveOccurred())
createCluster(g, crClient, cluster)
expectedWorkerNodeConditions := util.GetEffectiveNodeConditionsForWorkers(shoot)
Expand Down Expand Up @@ -259,7 +260,7 @@ func updateShootHibernationStatus(g *WithT, crClient client.Client, cluster *gar
}

func testInvalidShootInClusterSpec(g *WithT, crClient client.Client, reconciler *Reconciler) {
cluster, _, err := testutil.NewClusterBuilder().WithWorkerNames([]string{testutil.Worker1Name}).Build()
cluster, _, err := testutil.NewClusterBuilder().WithWorkerCount(1).Build()
g.Expect(err).ToNot(HaveOccurred())
cluster.Spec.Shoot.Object = nil
cluster.Spec.Shoot.Raw = []byte(`{"apiVersion": 8}`)
Expand All @@ -281,7 +282,7 @@ func updateShootDeletionTimeStamp(g *WithT, crClient client.Client, cluster *gar
}

func testProberShouldBeRemovedIfDeletionTimeStampIsSet(g *WithT, crClient client.Client, reconciler *Reconciler) {
cluster, shoot, err := testutil.NewClusterBuilder().WithWorkerNames([]string{testutil.Worker1Name}).Build()
cluster, shoot, err := testutil.NewClusterBuilder().WithWorkerCount(1).Build()
g.Expect(err).ToNot(HaveOccurred())
createCluster(g, crClient, cluster)
expectedWorkerNodeConditions := util.GetEffectiveNodeConditionsForWorkers(shoot)
Expand Down Expand Up @@ -314,7 +315,7 @@ func testShootCreationNotComplete(g *WithT, crClient client.Client, reconciler *
}

for _, testCase := range testCases {
cluster, shoot, err := testutil.NewClusterBuilder().WithWorkerNames([]string{testutil.Worker1Name}).WithNodeMonitorGracePeriod(shootKCMNodeMonitorGracePeriod).Build()
cluster, shoot, err := testutil.NewClusterBuilder().WithWorkerCount(1).WithNodeMonitorGracePeriod(shootKCMNodeMonitorGracePeriod).Build()
g.Expect(err).ToNot(HaveOccurred())
setShootLastOperationStatus(cluster, shoot, gardencorev1beta1.LastOperationTypeCreate, testCase.lastOpState)
createCluster(g, crClient, cluster)
Expand All @@ -329,7 +330,7 @@ func testShootCreationNotComplete(g *WithT, crClient client.Client, reconciler *
}

func testShootIsMigrating(g *WithT, crClient client.Client, reconciler *Reconciler) {
cluster, shoot, err := testutil.NewClusterBuilder().WithWorkerNames([]string{testutil.Worker1Name}).Build()
cluster, shoot, err := testutil.NewClusterBuilder().WithWorkerCount(1).Build()
g.Expect(err).ToNot(HaveOccurred())
createCluster(g, crClient, cluster)
setShootLastOperationStatus(cluster, shoot, gardencorev1beta1.LastOperationTypeMigrate, "")
Expand All @@ -339,7 +340,7 @@ func testShootIsMigrating(g *WithT, crClient client.Client, reconciler *Reconcil
}

func testShootRestoringIsNotComplete(g *WithT, crClient client.Client, reconciler *Reconciler) {
cluster, shoot, err := testutil.NewClusterBuilder().WithWorkerNames([]string{testutil.Worker1Name}).Build()
cluster, shoot, err := testutil.NewClusterBuilder().WithWorkerCount(1).Build()
g.Expect(err).ToNot(HaveOccurred())
createCluster(g, crClient, cluster)
expectedWorkerNodeConditions := util.GetEffectiveNodeConditionsForWorkers(shoot)
Expand All @@ -356,7 +357,7 @@ func testShootRestoringIsNotComplete(g *WithT, crClient client.Client, reconcile
}

func testLastOperationIsRestoreAndSuccessful(g *WithT, crClient client.Client, reconciler *Reconciler) {
cluster, shoot, err := testutil.NewClusterBuilder().WithWorkerNames([]string{testutil.Worker1Name}).Build()
cluster, shoot, err := testutil.NewClusterBuilder().WithWorkerCount(1).Build()
g.Expect(err).ToNot(HaveOccurred())
setShootLastOperationStatus(cluster, shoot, gardencorev1beta1.LastOperationTypeRestore, gardencorev1beta1.LastOperationStateSucceeded)
createCluster(g, crClient, cluster)
Expand All @@ -366,7 +367,7 @@ func testLastOperationIsRestoreAndSuccessful(g *WithT, crClient client.Client, r
}

func testLastOperationIsShootReconciliation(g *WithT, crClient client.Client, reconciler *Reconciler) {
cluster, shoot, err := testutil.NewClusterBuilder().WithWorkerNames([]string{testutil.Worker1Name}).Build()
cluster, shoot, err := testutil.NewClusterBuilder().WithWorkerCount(1).Build()
g.Expect(err).ToNot(HaveOccurred())
setShootLastOperationStatus(cluster, shoot, gardencorev1beta1.LastOperationTypeReconcile, "")
createCluster(g, crClient, cluster)
Expand All @@ -383,12 +384,10 @@ func testShootHasNoWorkers(g *WithT, crClient client.Client, reconciler *Reconci
}

func proberShouldBePresent(g *WithT, reconciler *Reconciler, cluster *gardenerv1alpha1.Cluster, expectedKCMNodeMonitorGraceDuration metav1.Duration, expectedWorkerNodeConditions map[string][]string) {
g.Eventually(func() int { return len(reconciler.ProberMgr.GetAllProbers()) }, 10*time.Second, 1*time.Second).Should(Equal(1))
prober, ok := reconciler.ProberMgr.GetProber(cluster.ObjectMeta.Name)
g.Expect(ok).To(BeTrue())
g.Expect(*prober.GetConfig().KCMNodeMonitorGraceDuration).To(Equal(expectedKCMNodeMonitorGraceDuration))
g.Expect(prober.AreWorkerNodeConditionsStale(expectedWorkerNodeConditions)).To(BeFalse())
g.Expect(prober.IsClosed()).To(BeFalse())
g.Eventually(func() bool {
prober, ok := reconciler.ProberMgr.GetProber(cluster.ObjectMeta.Name)
return ok && reflect.DeepEqual(*prober.GetConfig().KCMNodeMonitorGraceDuration, expectedKCMNodeMonitorGraceDuration) && !prober.AreWorkerNodeConditionsStale(expectedWorkerNodeConditions) && !prober.IsClosed()
}, 10*time.Second, 1*time.Second).Should(BeTrue())
}

func proberShouldNotBePresent(g *WithT, reconciler *Reconciler, cluster *gardenerv1alpha1.Cluster) {
Expand Down
44 changes: 22 additions & 22 deletions controllers/cluster/clusterpredicate_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -22,35 +22,35 @@ import (
func TestCreateAndDeletePredicateFunc(t *testing.T) {
tests := []struct {
title string
run func(g *WithT, workerNames []string) bool
workerNames []string
run func(g *WithT, numWorkers int) bool
numWorkers int
expectedResult bool
}{
{"test: create predicate func for cluster having workers", testCreatePredicateFunc, []string{test.Worker1Name, test.Worker2Name}, true},
{"test: create predicate func for cluster having no workers", testCreatePredicateFunc, nil, false},
{"test: delete predicate func for cluster having workers", testDeletePredicateFunc, []string{test.Worker1Name, test.Worker2Name}, true},
{"test: delete predicate func for cluster having no workers", testDeletePredicateFunc, nil, false},
{"test: create predicate func for cluster having workers", testCreatePredicateFunc, 2, true},
{"test: create predicate func for cluster having no workers", testCreatePredicateFunc, 0, false},
{"test: delete predicate func for cluster having workers", testDeletePredicateFunc, 2, true},
{"test: delete predicate func for cluster having no workers", testDeletePredicateFunc, 0, false},
}

for _, entry := range tests {
t.Run(entry.title, func(t *testing.T) {
g := NewWithT(t)
predicateResult := entry.run(g, entry.workerNames)
predicateResult := entry.run(g, entry.numWorkers)
g.Expect(predicateResult).To(Equal(entry.expectedResult))
})
}
}

func testCreatePredicateFunc(g *WithT, workerNames []string) bool {
cluster, _, err := test.NewClusterBuilder().WithWorkerNames(workerNames).WithRawShoot(true).Build()
func testCreatePredicateFunc(g *WithT, workerCount int) bool {
cluster, _, err := test.NewClusterBuilder().WithWorkerCount(workerCount).WithRawShoot(true).Build()
g.Expect(err).ToNot(HaveOccurred())
e := event.CreateEvent{Object: cluster}
predicateFuncs := workerLessShoot(logr.Discard())
return predicateFuncs.Create(e)
}

func testDeletePredicateFunc(g *WithT, workerNames []string) bool {
cluster, _, err := test.NewClusterBuilder().WithWorkerNames(workerNames).WithRawShoot(true).Build()
func testDeletePredicateFunc(g *WithT, workerCount int) bool {
cluster, _, err := test.NewClusterBuilder().WithWorkerCount(workerCount).WithRawShoot(true).Build()
g.Expect(err).ToNot(HaveOccurred())
e := event.DeleteEvent{Object: cluster}
predicateFuncs := workerLessShoot(logr.Discard())
Expand All @@ -60,30 +60,30 @@ func testDeletePredicateFunc(g *WithT, workerNames []string) bool {
func TestUpdatePredicateFunc(t *testing.T) {
tests := []struct {
title string
oldWorkerNames []string
newWorkerNames []string
oldNumWorkers int
newNumWorkers int
expectedResult bool
}{
{"test: both old and new cluster do not have workers", nil, nil, false},
{"test: old cluster has no workers and new cluster has workers", nil, []string{test.Worker1Name}, true},
{"test: old cluster has workers and new cluster do not have workers", []string{test.Worker1Name, test.Worker2Name}, nil, true},
{"test: both old and new cluster have workers and there is no change", []string{test.Worker1Name}, []string{test.Worker1Name}, true},
{"test: both old and new cluster have workers and are different in number", []string{test.Worker1Name}, []string{test.Worker1Name, test.Worker2Name}, true},
{"test: both old and new cluster do not have workers", 0, 0, false},
{"test: old cluster has no workers and new cluster has workers", 0, 1, true},
{"test: old cluster has workers and new cluster do not have workers", 2, 0, true},
{"test: both old and new cluster have workers and there is no change", 1, 1, true},
{"test: both old and new cluster have workers and are different in number", 1, 2, true},
}

for _, entry := range tests {
t.Run(entry.title, func(t *testing.T) {
g := NewWithT(t)
predicateResult := testUpdatePredicateFunc(g, entry.oldWorkerNames, entry.newWorkerNames)
predicateResult := testUpdatePredicateFunc(g, entry.oldNumWorkers, entry.newNumWorkers)
g.Expect(predicateResult).To(Equal(entry.expectedResult))
})
}
}

func testUpdatePredicateFunc(g *WithT, oldWorkerNames, newWorkerNames []string) bool {
oldCluster, _, err := test.NewClusterBuilder().WithWorkerNames(oldWorkerNames).WithRawShoot(true).Build()
func testUpdatePredicateFunc(g *WithT, oldNumWorkers, newNumWorkers int) bool {
oldCluster, _, err := test.NewClusterBuilder().WithWorkerCount(oldNumWorkers).WithRawShoot(true).Build()
g.Expect(err).ToNot(HaveOccurred())
newCluster, _, err := test.NewClusterBuilder().WithWorkerNames(newWorkerNames).WithRawShoot(true).Build()
newCluster, _, err := test.NewClusterBuilder().WithWorkerCount(newNumWorkers).WithRawShoot(true).Build()
g.Expect(err).ToNot(HaveOccurred())
e := event.UpdateEvent{
ObjectOld: oldCluster,
Expand Down
6 changes: 4 additions & 2 deletions controllers/endpoint/endpoints_controller_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,8 @@ import (
"testing"
"time"

"k8s.io/apimachinery/pkg/util/rand"

testutil "github.com/gardener/dependency-watchdog/internal/test"
"k8s.io/client-go/kubernetes/scheme"

Expand Down Expand Up @@ -126,7 +128,7 @@ func testWeederSharedEnvTest(t *testing.T) {

for _, test := range tests {
childCtx, chileCancelFn := context.WithCancel(ctx)
testNs := testutil.GenerateRandomAlphanumericString(g, 4)
testNs := rand.String(4)
testutil.CreateTestNamespace(childCtx, g, reconciler.Client, testNs)
t.Run(test.description, func(t *testing.T) {
test.run(childCtx, chileCancelFn, g, reconciler, testNs)
Expand All @@ -150,7 +152,7 @@ func testWeederDedicatedEnvTest(t *testing.T) {
for _, test := range tests {
ctx, cancelFn := context.WithCancel(context.Background())
testEnv, reconciler := setupWeederEnv(ctx, t, test.apiServerFlags)
testNs := testutil.GenerateRandomAlphanumericString(g, 4)
testNs := rand.String(4)
testutil.CreateTestNamespace(ctx, g, reconciler.Client, testNs)
t.Run(test.description, func(t *testing.T) {
test.run(ctx, cancelFn, g, reconciler, testNs)
Expand Down
7 changes: 5 additions & 2 deletions internal/prober/errors/errors.go
Original file line number Diff line number Diff line change
Expand Up @@ -21,8 +21,11 @@ const (
// ProbeError is the error type for probe errors. It contains the error code, the cause of the error, and the error message.
// It is used by prober to record its last error and is currently only used for unit tests.
type ProbeError struct {
Code ErrorCode
Cause error
// Code is the error code that is returned by the probe.
Code ErrorCode
// Cause is the error that happened during the probe.
Cause error
// Message is used for mentioning additional details describing the error.
Message string
}

Expand Down
9 changes: 5 additions & 4 deletions internal/prober/fakes/k8s/client.go
Original file line number Diff line number Diff line change
Expand Up @@ -116,10 +116,11 @@ func (c *fakeClient) List(ctx context.Context, list client.ObjectList, opts ...c

func (c *fakeClient) getRecordedObjectCollectionError(method ClientMethod, namespace string, labelSelector labels.Selector, objGVK schema.GroupVersionKind) error {
for _, errRecord := range c.errorRecords {
if errRecord.method == method && errRecord.resourceNamespace == namespace {
if errRecord.resourceGVK == objGVK || (labelSelector == nil && errRecord.labels == nil) || labelSelector.Matches(errRecord.labels) {
return errRecord.err
}
if errRecord.method == method && errRecord.resourceNamespace == namespace &&
(errRecord.resourceGVK == objGVK || // if the GVK is set, we need to match it
(labelSelector == nil && errRecord.labels == nil) || // if there is no label selector and no labels in the error record
labelSelector.Matches(errRecord.labels)) { // if there is a label selector, and it matches the labels in the error record
return errRecord.err
}
}
return nil
Expand Down
1 change: 1 addition & 0 deletions internal/prober/prober.go
Original file line number Diff line number Diff line change
Expand Up @@ -111,6 +111,7 @@ func (p *Prober) probe(ctx context.Context) {
candidateNodeLeases, err := p.probeNodeLeases(ctx, shootClient)
if err != nil {
p.recordError(err, errors.ErrProbeNodeLease, "Failed to probe node leases")
p.l.Error(err, "Failed to probe node leases, ignoring error, probe will be re-attempted")
return
}
if len(candidateNodeLeases) != 1 {
Expand Down
4 changes: 3 additions & 1 deletion internal/prober/shoot/client_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,8 @@ import (
"testing"
"time"

"k8s.io/apimachinery/pkg/util/rand"

"github.com/gardener/dependency-watchdog/internal/prober/fakes/k8s"
corev1 "k8s.io/api/core/v1"
"sigs.k8s.io/controller-runtime/pkg/client"
Expand Down Expand Up @@ -48,7 +50,7 @@ func TestSuite(t *testing.T) {
t.Run(tc.description, func(t *testing.T) {
ctx := context.Background()
k8sClient := k8s.NewFakeClientBuilder().Build()
testNs := test.GenerateRandomAlphanumericString(g, 4)
testNs := rand.String(4)
test.CreateTestNamespace(ctx, g, k8sClient, testNs)
tc.run(ctx, t, testNs, k8sClient)
})
Expand Down
Loading

0 comments on commit b5fca63

Please sign in to comment.