Skip to content

Commit

Permalink
Merge pull request #11252 from fabriziopandini/improve-machine-contro…
Browse files Browse the repository at this point in the history
…ller-unit-tests

🌱 Improve unit tests for Machine controller
  • Loading branch information
k8s-ci-robot authored Oct 8, 2024
2 parents 265e672 + e678f88 commit 48ee02a
Show file tree
Hide file tree
Showing 3 changed files with 620 additions and 243 deletions.
155 changes: 155 additions & 0 deletions internal/controllers/machine/machine_controller_noderef_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -26,6 +26,7 @@ import (
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/client-go/tools/record"
"k8s.io/utils/ptr"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
Expand All @@ -34,13 +35,167 @@ import (
"sigs.k8s.io/controller-runtime/pkg/reconcile"

clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"
"sigs.k8s.io/cluster-api/api/v1beta1/index"
"sigs.k8s.io/cluster-api/controllers/remote"
"sigs.k8s.io/cluster-api/internal/test/builder"
"sigs.k8s.io/cluster-api/internal/topology/ownerrefs"
"sigs.k8s.io/cluster-api/util"
"sigs.k8s.io/cluster-api/util/kubeconfig"
)

func TestReconcileNode(t *testing.T) {
defaultMachine := clusterv1.Machine{
ObjectMeta: metav1.ObjectMeta{
Name: "machine-test",
Namespace: metav1.NamespaceDefault,
Labels: map[string]string{
clusterv1.ClusterNameLabel: "test-cluster",
},
},
Spec: clusterv1.MachineSpec{
ProviderID: ptr.To("aws://us-east-1/test-node-1"),
},
}

defaultCluster := &clusterv1.Cluster{
ObjectMeta: metav1.ObjectMeta{
Name: "test-cluster",
Namespace: metav1.NamespaceDefault,
},
}

testCases := []struct {
name string
machine *clusterv1.Machine
node *corev1.Node
nodeGetErr bool
expectResult ctrl.Result
expectError bool
expected func(g *WithT, m *clusterv1.Machine)
}{
{
name: "No op if provider ID is not set",
machine: &clusterv1.Machine{},
node: nil,
nodeGetErr: false,
expectResult: ctrl.Result{},
expectError: false,
},
{
name: "err reading node (something different than not found), it should return error",
machine: defaultMachine.DeepCopy(),
node: nil,
nodeGetErr: true,
expectResult: ctrl.Result{},
expectError: true,
},
{
name: "waiting for the node to exist, no op",
machine: defaultMachine.DeepCopy(),
node: nil,
nodeGetErr: false,
expectResult: ctrl.Result{},
expectError: false,
},
{
name: "node found, should surface info",
machine: defaultMachine.DeepCopy(),
node: &corev1.Node{
ObjectMeta: metav1.ObjectMeta{
Name: "test-node-1",
},
Spec: corev1.NodeSpec{
ProviderID: "aws://us-east-1/test-node-1",
},
Status: corev1.NodeStatus{
NodeInfo: corev1.NodeSystemInfo{
MachineID: "foo",
},
Addresses: []corev1.NodeAddress{
{
Type: corev1.NodeInternalIP,
Address: "1.1.1.1",
},
{
Type: corev1.NodeInternalIP,
Address: "2.2.2.2",
},
},
},
},
nodeGetErr: false,
expectResult: ctrl.Result{},
expectError: false,
expected: func(g *WithT, m *clusterv1.Machine) {
g.Expect(m.Status.NodeRef).ToNot(BeNil())
g.Expect(m.Status.NodeRef.Name).To(Equal("test-node-1"))
g.Expect(m.Status.NodeInfo).ToNot(BeNil())
g.Expect(m.Status.NodeInfo.MachineID).To(Equal("foo"))
},
},
{
name: "node not found when already seen, should error",
machine: &clusterv1.Machine{
ObjectMeta: metav1.ObjectMeta{
Name: "machine-test",
Namespace: metav1.NamespaceDefault,
Labels: map[string]string{
clusterv1.ClusterNameLabel: "test-cluster",
},
},
Spec: clusterv1.MachineSpec{
ProviderID: ptr.To("aws://us-east-1/test-node-1"),
},
Status: clusterv1.MachineStatus{
NodeRef: &corev1.ObjectReference{
Kind: "Node",
Name: "test-node-1",
APIVersion: "v1",
},
},
},
node: nil,
nodeGetErr: false,
expectResult: ctrl.Result{},
expectError: true,
},
}

for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
g := NewWithT(t)

c := fake.NewClientBuilder().WithObjects(tc.machine).WithIndex(&corev1.Node{}, "spec.providerID", index.NodeByProviderID).Build()
if tc.nodeGetErr {
c = fake.NewClientBuilder().WithObjects(tc.machine).Build() // No Index
}

if tc.node != nil {
g.Expect(c.Create(ctx, tc.node)).To(Succeed())
defer func() { _ = c.Delete(ctx, tc.node) }()
}

r := &Reconciler{
Tracker: remote.NewTestClusterCacheTracker(ctrl.Log, c, c, fakeScheme, client.ObjectKeyFromObject(defaultCluster)),
Client: c,
recorder: record.NewFakeRecorder(10),
}
s := &scope{cluster: defaultCluster, machine: tc.machine}
result, err := r.reconcileNode(ctx, s)
g.Expect(result).To(BeComparableTo(tc.expectResult))
if tc.expectError {
g.Expect(err).To(HaveOccurred())
} else {
g.Expect(err).ToNot(HaveOccurred())
}

if tc.expected != nil {
tc.expected(g, tc.machine)
}
})
}
}

func TestGetNode(t *testing.T) {
g := NewWithT(t)

Expand Down
34 changes: 13 additions & 21 deletions internal/controllers/machine/machine_controller_phases.go
Original file line number Diff line number Diff line change
Expand Up @@ -32,6 +32,7 @@ import (
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
"sigs.k8s.io/controller-runtime/pkg/handler"
"sigs.k8s.io/controller-runtime/pkg/reconcile"

clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"
"sigs.k8s.io/cluster-api/controllers/external"
Expand Down Expand Up @@ -95,10 +96,9 @@ func (r *Reconciler) reconcileExternal(ctx context.Context, cluster *clusterv1.C
if err != nil {
return external.ReconcileOutput{}, err
}
if result.RequeueAfter > 0 || result.Paused {
if result.RequeueAfter > 0 {
return result, nil
}

obj := result.Result

// Set failure reason and message, if any.
Expand Down Expand Up @@ -139,12 +139,6 @@ func (r *Reconciler) ensureExternalOwnershipAndWatch(ctx context.Context, cluste
return external.ReconcileOutput{}, err
}

// if external ref is paused, return error.
if annotations.IsPaused(cluster, obj) {
log.V(3).Info("External object referenced is paused")
return external.ReconcileOutput{Paused: true}, nil
}

// Initialize the patch helper.
patchHelper, err := patch.NewHelper(obj, r.Client)
if err != nil {
Expand Down Expand Up @@ -176,6 +170,9 @@ func (r *Reconciler) ensureExternalOwnershipAndWatch(ctx context.Context, cluste
return external.ReconcileOutput{}, err
}

if annotations.IsPaused(cluster, obj) {
return external.ReconcileOutput{Result: obj, Paused: true}, nil
}
return external.ReconcileOutput{Result: obj}, nil
}

Expand All @@ -197,11 +194,6 @@ func (r *Reconciler) reconcileBootstrap(ctx context.Context, s *scope) (ctrl.Res
}
s.bootstrapConfig = externalResult.Result

// If the external object is paused return.
if externalResult.Paused {
return ctrl.Result{}, nil
}

if externalResult.RequeueAfter > 0 {
return ctrl.Result{RequeueAfter: externalResult.RequeueAfter}, nil
}
Expand Down Expand Up @@ -271,14 +263,11 @@ func (r *Reconciler) reconcileInfrastructure(ctx context.Context, s *scope) (ctr
m.Status.FailureReason = ptr.To(capierrors.InvalidConfigurationMachineError)
m.Status.FailureMessage = ptr.To(fmt.Sprintf("Machine infrastructure resource %v with name %q has been deleted after being ready",
m.Spec.InfrastructureRef.GroupVersionKind(), m.Spec.InfrastructureRef.Name))
return ctrl.Result{}, errors.Errorf("could not find %v %q for Machine %q in namespace %q, requeuing", m.Spec.InfrastructureRef.GroupVersionKind().String(), m.Spec.InfrastructureRef.Name, m.Name, m.Namespace)
return ctrl.Result{}, reconcile.TerminalError(errors.Errorf("could not find %v %q for Machine %q in namespace %q", m.Spec.InfrastructureRef.GroupVersionKind().String(), m.Spec.InfrastructureRef.Name, m.Name, m.Namespace))
}
return ctrl.Result{RequeueAfter: infraReconcileResult.RequeueAfter}, nil
}
// if the external object is paused, return without any further processing
if infraReconcileResult.Paused {
return ctrl.Result{}, nil
}

infraConfig := infraReconcileResult.Result

if !infraConfig.GetDeletionTimestamp().IsZero() {
Expand All @@ -293,16 +282,15 @@ func (r *Reconciler) reconcileInfrastructure(ctx context.Context, s *scope) (ctr
if ready && !m.Status.InfrastructureReady {
log.Info("Infrastructure provider has completed machine infrastructure provisioning and reports status.ready", infraConfig.GetKind(), klog.KObj(infraConfig))
}
m.Status.InfrastructureReady = ready

// Report a summary of current status of the infrastructure object defined for this machine.
conditions.SetMirror(m, clusterv1.InfrastructureReadyCondition,
conditions.UnstructuredGetter(infraConfig),
conditions.WithFallbackValue(ready, clusterv1.WaitingForInfrastructureFallbackReason, clusterv1.ConditionSeverityInfo, ""),
)

// If the infrastructure provider is not ready, return early.
if !ready {
// If the infrastructure provider is not ready (and it wasn't ready before), return early.
if !ready && !m.Status.InfrastructureReady {
log.Info("Waiting for infrastructure provider to create machine infrastructure and report status.ready", infraConfig.GetKind(), klog.KObj(infraConfig))
return ctrl.Result{}, nil
}
Expand Down Expand Up @@ -332,7 +320,11 @@ func (r *Reconciler) reconcileInfrastructure(ctx context.Context, s *scope) (ctr
m.Spec.FailureDomain = ptr.To(failureDomain)
}

// When we hit this point provider id is set, and either:
// - the infra machine is reporting ready for the first time
// - the infra machine already reported ready (and thus m.Status.InfrastructureReady is already true and it should not flip back)
m.Spec.ProviderID = ptr.To(providerID)
m.Status.InfrastructureReady = true
return ctrl.Result{}, nil
}

Expand Down
Loading

0 comments on commit 48ee02a

Please sign in to comment.