diff --git a/config/crd/bases/anywhere.eks.amazonaws.com_controlplaneupgrades.yaml b/config/crd/bases/anywhere.eks.amazonaws.com_controlplaneupgrades.yaml index a68e2ee9f019..dcf6e1a3ce7b 100644 --- a/config/crd/bases/anywhere.eks.amazonaws.com_controlplaneupgrades.yaml +++ b/config/crd/bases/anywhere.eks.amazonaws.com_controlplaneupgrades.yaml @@ -89,10 +89,6 @@ spec: upgraded: format: int64 type: integer - required: - - ready - - requireUpgrade - - upgraded type: object type: object served: true diff --git a/controllers/controlplaneupgrade_controller.go b/controllers/controlplaneupgrade_controller.go index 2f5407d46a44..5c0e7cbbfd9c 100644 --- a/controllers/controlplaneupgrade_controller.go +++ b/controllers/controlplaneupgrade_controller.go @@ -18,23 +18,43 @@ package controllers import ( "context" + "fmt" + "time" + "github.com/go-logr/logr" + corev1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + kerrors "k8s.io/apimachinery/pkg/util/errors" + "sigs.k8s.io/cluster-api/util/patch" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" - "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" + "sigs.k8s.io/controller-runtime/pkg/reconcile" anywherev1 "github.com/aws/eks-anywhere/pkg/api/v1alpha1" + "github.com/aws/eks-anywhere/pkg/constants" +) + +// controlPlaneUpgradeFinalizerName is the finalizer added to NodeUpgrade objects to handle deletion. +const ( + controlPlaneUpgradeFinalizerName = "controlplaneupgrades.anywhere.eks.amazonaws.com/finalizer" + // TODO(in-place): Fetch these versions dynamically from the bundle instead of using the hardcoded one. + kubernetesVersion = "v1.28.3-eks-1-28-9" + etcdVersion = "v3.5.9-eks-1-28-9" ) // ControlPlaneUpgradeReconciler reconciles a ControlPlaneUpgradeReconciler object. type ControlPlaneUpgradeReconciler struct { client client.Client + log logr.Logger } // NewControlPlaneUpgradeReconciler returns a new instance of ControlPlaneUpgradeReconciler. func NewControlPlaneUpgradeReconciler(client client.Client) *ControlPlaneUpgradeReconciler { return &ControlPlaneUpgradeReconciler{ client: client, + log: ctrl.Log.WithName("ControlPlaneUpgradeController"), } } @@ -43,12 +63,61 @@ func NewControlPlaneUpgradeReconciler(client client.Client) *ControlPlaneUpgrade //+kubebuilder:rbac:groups=anywhere.eks.amazonaws.com,resources=controlplaneupgrades/finalizers,verbs=update // Reconcile reconciles a ControlPlaneUpgrade object. -func (r *ControlPlaneUpgradeReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { - _ = log.FromContext(ctx) +// nolint:gocyclo +func (r *ControlPlaneUpgradeReconciler) Reconcile(ctx context.Context, req ctrl.Request) (result ctrl.Result, reterr error) { + log := r.log.WithValues("ControlPlaneUpgrade", req.NamespacedName) - // TODO(user): your logic here + log.Info("Reconciling ControlPlaneUpgrade object") + cpUpgrade := &anywherev1.ControlPlaneUpgrade{} + if err := r.client.Get(ctx, req.NamespacedName, cpUpgrade); err != nil { + if apierrors.IsNotFound(err) { + return reconcile.Result{}, err + } + return ctrl.Result{}, err + } - return ctrl.Result{}, nil + patchHelper, err := patch.NewHelper(cpUpgrade, r.client) + if err != nil { + return ctrl.Result{}, err + } + + defer func() { + err := r.updateStatus(ctx, log, cpUpgrade) + if err != nil { + reterr = kerrors.NewAggregate([]error{reterr, err}) + } + + // Always attempt to patch the object and status after each reconciliation. + patchOpts := []patch.Option{} + + // We want the observedGeneration to indicate, that the status shown is up-to-date given the desired spec of the same generation. + // However, if there is an error while updating the status, we may get a partial status update, In this case, + // a partially updated status is not considered up to date, so we should not update the observedGeneration + + // Patch ObservedGeneration only if the reconciliation completed without error + if reterr == nil { + patchOpts = append(patchOpts, patch.WithStatusObservedGeneration{}) + } + // Always attempt to patch the object and status after each reconciliation. + if err := patchHelper.Patch(ctx, cpUpgrade, patchOpts...); err != nil { + reterr = kerrors.NewAggregate([]error{reterr, err}) + } + + // Only requeue if we are not already re-queueing and the Cluster ready condition is false. + // We do this to be able to update the status continuously until the cluster becomes ready, + // since there might be changes in state of the world that don't trigger reconciliation requests + + if reterr == nil && !result.Requeue && result.RequeueAfter <= 0 && !cpUpgrade.Status.Ready { + result = ctrl.Result{RequeueAfter: 10 * time.Second} + } + }() + + // Reconcile the NodeUpgrade deletion if the DeletionTimestamp is set. + if !cpUpgrade.DeletionTimestamp.IsZero() { + return r.reconcileDelete(ctx, log, cpUpgrade) + } + controllerutil.AddFinalizer(cpUpgrade, controlPlaneUpgradeFinalizerName) + return r.reconcile(ctx, log, cpUpgrade) } // SetupWithManager sets up the controller with the Manager. @@ -57,3 +126,109 @@ func (r *ControlPlaneUpgradeReconciler) SetupWithManager(mgr ctrl.Manager) error For(&anywherev1.ControlPlaneUpgrade{}). Complete(r) } + +func (r *ControlPlaneUpgradeReconciler) reconcile(ctx context.Context, log logr.Logger, cpUpgrade *anywherev1.ControlPlaneUpgrade) (ctrl.Result, error) { + var firstControlPlane bool + // return early if controlplane upgrade is already complete + if cpUpgrade.Status.Ready { + log.Info("All Control Plane nodes are upgraded") + return ctrl.Result{}, nil + } + + log.Info("Upgrading all Control Plane nodes") + + for idx, machineRef := range cpUpgrade.Spec.MachinesRequireUpgrade { + firstControlPlane = idx == 0 + nodeUpgrade := nodeUpgrader(machineRef, kubernetesVersion, etcdVersion, firstControlPlane) + if err := r.client.Get(ctx, GetNamespacedNameType(nodeUpgraderName(machineRef.Name), constants.EksaSystemNamespace), nodeUpgrade); err != nil { + if apierrors.IsNotFound(err) { + if err := r.client.Create(ctx, nodeUpgrade); client.IgnoreAlreadyExists(err) != nil { + return ctrl.Result{}, fmt.Errorf("failed to create node upgrader for machine %s: %v", machineRef.Name, err) + } + return ctrl.Result{}, nil + } + return ctrl.Result{}, fmt.Errorf("getting node upgrader for machine %s: %v", machineRef.Name, err) + } + if !nodeUpgrade.Status.Completed { + return ctrl.Result{}, nil + } + } + return ctrl.Result{}, nil +} + +// nodeUpgradeName returns the name of the node upgrade object based on the machine reference. +func nodeUpgraderName(machineRefName string) string { + return fmt.Sprintf("%s-node-upgrader", machineRefName) +} + +func nodeUpgrader(machineRef anywherev1.Ref, kubernetesVersion, etcdVersion string, firstControlPlane bool) *anywherev1.NodeUpgrade { + return &anywherev1.NodeUpgrade{ + ObjectMeta: metav1.ObjectMeta{ + Name: nodeUpgraderName(machineRef.Name), + Namespace: constants.EksaSystemNamespace, + }, + Spec: anywherev1.NodeUpgradeSpec{ + Machine: corev1.ObjectReference{ + Kind: machineRef.Kind, + Namespace: constants.EksaSystemNamespace, + Name: machineRef.Name, + }, + KubernetesVersion: kubernetesVersion, + EtcdVersion: &etcdVersion, + FirstNodeToBeUpgraded: firstControlPlane, + }, + } +} + +func (r *ControlPlaneUpgradeReconciler) reconcileDelete(ctx context.Context, log logr.Logger, cpUpgrade *anywherev1.ControlPlaneUpgrade) (ctrl.Result, error) { + log.Info("Reconcile ControlPlaneUpgrade deletion") + + for _, machineRef := range cpUpgrade.Spec.MachinesRequireUpgrade { + nodeUpgrade := &anywherev1.NodeUpgrade{} + if err := r.client.Get(ctx, GetNamespacedNameType(nodeUpgraderName(machineRef.Name), constants.EksaSystemNamespace), nodeUpgrade); err != nil { + if apierrors.IsNotFound(err) { + log.Info("Node Upgrader not found, skipping node upgrade deletion") + } else { + return ctrl.Result{}, fmt.Errorf("getting node upgrader for machine %s: %v", machineRef.Name, err) + } + } else { + log.Info("Deleting node upgrader", "Machine", machineRef.Name) + if err := r.client.Delete(ctx, nodeUpgrade); err != nil { + return ctrl.Result{}, fmt.Errorf("deleting node upgrader: %v", err) + } + } + } + + // Remove the finalizer on ControlPlaneUpgrade objext + controllerutil.RemoveFinalizer(cpUpgrade, controlPlaneUpgradeFinalizerName) + return ctrl.Result{}, nil +} + +func (r *ControlPlaneUpgradeReconciler) updateStatus(ctx context.Context, log logr.Logger, cpUpgrade *anywherev1.ControlPlaneUpgrade) error { + // When ControlPlaneUpgrade is fully deleted, we do not need to update the status. Without this check + // the subsequent patch operations would fail if the status is updated after it is fully deleted. + + if !cpUpgrade.DeletionTimestamp.IsZero() && len(cpUpgrade.GetFinalizers()) == 0 { + log.Info("ControlPlaneUpgrade is deleted, skipping status update") + return nil + } + + log.Info("Updating ControlPlaneUpgrade status") + nodeUpgrade := &anywherev1.NodeUpgrade{} + nodesUpgradeCompleted := 0 + nodesUpgradeRequired := len(cpUpgrade.Spec.MachinesRequireUpgrade) + for _, machineRef := range cpUpgrade.Spec.MachinesRequireUpgrade { + if err := r.client.Get(ctx, GetNamespacedNameType(nodeUpgraderName(machineRef.Name), constants.EksaSystemNamespace), nodeUpgrade); err != nil { + return fmt.Errorf("getting node upgrader for machine %s: %v", machineRef.Name, err) + } + if nodeUpgrade.Status.Completed { + nodesUpgradeCompleted++ + nodesUpgradeRequired-- + } + } + log.Info("Control Plane Nodes ready", "total", cpUpgrade.Status.Upgraded, "need-upgrade", cpUpgrade.Status.RequireUpgrade) + cpUpgrade.Status.Upgraded = int64(nodesUpgradeCompleted) + cpUpgrade.Status.RequireUpgrade = int64(nodesUpgradeRequired) + cpUpgrade.Status.Ready = nodesUpgradeRequired == 0 + return nil +} diff --git a/controllers/controlplaneupgrade_controller_test.go b/controllers/controlplaneupgrade_controller_test.go new file mode 100644 index 000000000000..6cf6af6b05ad --- /dev/null +++ b/controllers/controlplaneupgrade_controller_test.go @@ -0,0 +1,253 @@ +package controllers_test + +import ( + "context" + "fmt" + "testing" + + . "github.com/onsi/gomega" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + "sigs.k8s.io/controller-runtime/pkg/client/fake" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + + "github.com/aws/eks-anywhere/controllers" + anywherev1 "github.com/aws/eks-anywhere/pkg/api/v1alpha1" +) + +func TestCPUpgradeReconcile(t *testing.T) { + g := NewWithT(t) + ctx := context.Background() + cluster, machines, nodes, cpUpgrade, nodeUpgrades := getObjectsForCPUpgradeTest() + for i := range nodeUpgrades { + nodeUpgrades[i].Name = fmt.Sprintf("%s-node-upgrader", machines[i].Name) + nodeUpgrades[i].Status = anywherev1.NodeUpgradeStatus{ + Completed: true, + } + } + + objs := []runtime.Object{cluster, machines[0], machines[1], nodes[0], nodes[1], cpUpgrade, nodeUpgrades[0], nodeUpgrades[1]} + client := fake.NewClientBuilder().WithRuntimeObjects(objs...).Build() + r := controllers.NewControlPlaneUpgradeReconciler(client) + req := cpUpgradeRequest(cpUpgrade) + _, err := r.Reconcile(ctx, req) + g.Expect(err).ToNot(HaveOccurred()) + + cpu := &anywherev1.ControlPlaneUpgrade{} + err = client.Get(ctx, types.NamespacedName{Name: cpUpgrade.Name, Namespace: "eksa-system"}, cpu) + g.Expect(err).ToNot(HaveOccurred()) +} + +func TestCPUpgradeReconcileEarly(t *testing.T) { + g := NewWithT(t) + ctx := context.Background() + cluster, machines, nodes, cpUpgrade, nodeUpgrades := getObjectsForCPUpgradeTest() + for i := range nodeUpgrades { + nodeUpgrades[i].Name = fmt.Sprintf("%s-node-upgrader", machines[i].Name) + nodeUpgrades[i].Status = anywherev1.NodeUpgradeStatus{ + Completed: true, + } + } + cpUpgrade.Status.Ready = true + objs := []runtime.Object{cluster, machines[0], machines[1], nodes[0], nodes[1], cpUpgrade, nodeUpgrades[0], nodeUpgrades[1]} + client := fake.NewClientBuilder().WithRuntimeObjects(objs...).Build() + r := controllers.NewControlPlaneUpgradeReconciler(client) + req := cpUpgradeRequest(cpUpgrade) + _, err := r.Reconcile(ctx, req) + g.Expect(err).ToNot(HaveOccurred()) + + cpu := &anywherev1.ControlPlaneUpgrade{} + err = client.Get(ctx, types.NamespacedName{Name: cpUpgrade.Name, Namespace: "eksa-system"}, cpu) + g.Expect(err).ToNot(HaveOccurred()) +} + +func TestCPUpgradeReconcileNodeNotUpgraded(t *testing.T) { + g := NewWithT(t) + ctx := context.Background() + cluster, machines, nodes, cpUpgrade, nodeUpgrades := getObjectsForCPUpgradeTest() + for i := range nodeUpgrades { + nodeUpgrades[i].Name = fmt.Sprintf("%s-node-upgrader", machines[i].Name) + nodeUpgrades[i].Status = anywherev1.NodeUpgradeStatus{ + Completed: false, + } + } + objs := []runtime.Object{cluster, machines[0], machines[1], nodes[0], nodes[1], cpUpgrade, nodeUpgrades[0], nodeUpgrades[1]} + client := fake.NewClientBuilder().WithRuntimeObjects(objs...).Build() + r := controllers.NewControlPlaneUpgradeReconciler(client) + req := cpUpgradeRequest(cpUpgrade) + _, err := r.Reconcile(ctx, req) + g.Expect(err).ToNot(HaveOccurred()) + + cpu := &anywherev1.ControlPlaneUpgrade{} + err = client.Get(ctx, types.NamespacedName{Name: cpUpgrade.Name, Namespace: "eksa-system"}, cpu) + g.Expect(err).ToNot(HaveOccurred()) +} + +func TestCPUpgradeReconcileNodeUpgradeError(t *testing.T) { + g := NewWithT(t) + ctx := context.Background() + cluster, machines, nodes, cpUpgrade, _ := getObjectsForCPUpgradeTest() + + objs := []runtime.Object{cluster, machines[0], machines[1], nodes[0], nodes[1], cpUpgrade} + client := fake.NewClientBuilder().WithRuntimeObjects(objs...).Build() + r := controllers.NewControlPlaneUpgradeReconciler(client) + req := cpUpgradeRequest(cpUpgrade) + _, err := r.Reconcile(ctx, req) + g.Expect(err).To(HaveOccurred()) + g.Expect(err).To(MatchError("getting node upgrader for machine machine02: nodeupgrades.anywhere.eks.amazonaws.com \"machine02-node-upgrader\" not found")) +} + +func TestCPUpgradeReconcileNodeUpgraderCreate(t *testing.T) { + g := NewWithT(t) + ctx := context.Background() + cluster, machines, nodes, cpUpgrade, nodeUpgrades := getObjectsForCPUpgradeTest() + for i := range nodeUpgrades { + nodeUpgrades[i].Name = fmt.Sprintf("%s-node-upgrader", machines[i].Name) + nodeUpgrades[i].Status = anywherev1.NodeUpgradeStatus{ + Completed: true, + } + } + objs := []runtime.Object{cluster, machines[0], machines[1], nodes[0], nodes[1], cpUpgrade, nodeUpgrades[0]} + client := fake.NewClientBuilder().WithRuntimeObjects(objs...).Build() + r := controllers.NewControlPlaneUpgradeReconciler(client) + req := cpUpgradeRequest(cpUpgrade) + _, err := r.Reconcile(ctx, req) + g.Expect(err).ToNot(HaveOccurred()) + cpu := &anywherev1.ControlPlaneUpgrade{} + err = client.Get(ctx, types.NamespacedName{Name: cpUpgrade.Name, Namespace: "eksa-system"}, cpu) + g.Expect(err).ToNot(HaveOccurred()) +} + +func TestCPUpgradeReconcileNodesNotReadyYet(t *testing.T) { + g := NewWithT(t) + ctx := context.Background() + + cluster, machines, nodes, cpUpgrade, nodeUpgrades := getObjectsForCPUpgradeTest() + for i := range nodeUpgrades { + nodeUpgrades[i].Name = fmt.Sprintf("%s-node-upgrader", machines[i].Name) + nodeUpgrades[i].Status = anywherev1.NodeUpgradeStatus{ + Completed: true, + } + } + cpUpgrade.Status = anywherev1.ControlPlaneUpgradeStatus{ + Upgraded: 0, + RequireUpgrade: 2, + } + objs := []runtime.Object{cluster, machines[0], machines[1], nodes[0], nodes[1], cpUpgrade, nodeUpgrades[0], nodeUpgrades[1]} + client := fake.NewClientBuilder().WithRuntimeObjects(objs...).Build() + r := controllers.NewControlPlaneUpgradeReconciler(client) + + req := cpUpgradeRequest(cpUpgrade) + _, err := r.Reconcile(ctx, req) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(cpUpgrade.Status.Ready).To(BeFalse()) +} + +func TestCPUpgradeReconcileDelete(t *testing.T) { + g := NewWithT(t) + ctx := context.Background() + now := metav1.Now() + + cluster, machines, nodes, cpUpgrade, nodeUpgrades := getObjectsForCPUpgradeTest() + for i := range nodeUpgrades { + nodeUpgrades[i].Name = fmt.Sprintf("%s-node-upgrader", machines[i].Name) + nodeUpgrades[i].Status = anywherev1.NodeUpgradeStatus{ + Completed: true, + } + } + cpUpgrade.DeletionTimestamp = &now + objs := []runtime.Object{cluster, machines[0], machines[1], nodes[0], nodes[1], cpUpgrade, nodeUpgrades[0], nodeUpgrades[1]} + client := fake.NewClientBuilder().WithRuntimeObjects(objs...).Build() + r := controllers.NewControlPlaneUpgradeReconciler(client) + req := cpUpgradeRequest(cpUpgrade) + _, err := r.Reconcile(ctx, req) + g.Expect(err).ToNot(HaveOccurred()) + + n := &anywherev1.NodeUpgrade{} + err = client.Get(ctx, types.NamespacedName{Name: nodeUpgrades[0].Name, Namespace: "eksa-system"}, n) + g.Expect(err).To(MatchError("nodeupgrades.anywhere.eks.amazonaws.com \"machine01-node-upgrader\" not found")) +} + +func TestCPUpgradeObjectDoesNotExist(t *testing.T) { + g := NewWithT(t) + ctx := context.Background() + + cluster, machines, nodes, cpUpgrade, nodeUpgrades := getObjectsForCPUpgradeTest() + for i := range nodeUpgrades { + nodeUpgrades[i].Name = fmt.Sprintf("%s-node-upgrader", machines[i].Name) + nodeUpgrades[i].Status = anywherev1.NodeUpgradeStatus{ + Completed: true, + } + } + objs := []runtime.Object{cluster, machines[0], machines[1], nodes[0], nodes[1], nodeUpgrades[0], nodeUpgrades[1]} + client := fake.NewClientBuilder().WithRuntimeObjects(objs...).Build() + r := controllers.NewControlPlaneUpgradeReconciler(client) + + req := cpUpgradeRequest(cpUpgrade) + _, err := r.Reconcile(ctx, req) + g.Expect(err).To(MatchError("controlplaneupgrades.anywhere.eks.amazonaws.com \"cp-upgrade-request\" not found")) +} + +func getObjectsForCPUpgradeTest() (*clusterv1.Cluster, []*clusterv1.Machine, []*corev1.Node, *anywherev1.ControlPlaneUpgrade, []*anywherev1.NodeUpgrade) { + cluster := generateCluster() + node1 := generateNode() + node2 := node1.DeepCopy() + node2.ObjectMeta.Name = "node02" + machine1 := generateMachine(cluster, node1) + machine2 := generateMachine(cluster, node2) + machine2.ObjectMeta.Name = "machine02" + nodeUpgrade1 := generateNodeUpgrade(machine1) + nodeUpgrade2 := generateNodeUpgrade(machine2) + nodeUpgrade2.ObjectMeta.Name = "node-upgrade-request-2" + machines := []*clusterv1.Machine{machine1, machine2} + nodes := []*corev1.Node{node1, node2} + nodeUpgrades := []*anywherev1.NodeUpgrade{nodeUpgrade1, nodeUpgrade2} + cpUpgrade := generateCPUpgrade(machines, cluster) + return cluster, machines, nodes, cpUpgrade, nodeUpgrades +} + +func cpUpgradeRequest(cpUpgrade *anywherev1.ControlPlaneUpgrade) reconcile.Request { + return reconcile.Request{ + NamespacedName: types.NamespacedName{ + Name: cpUpgrade.Name, + Namespace: cpUpgrade.Namespace, + }, + } +} + +func generateCPUpgrade(machine []*clusterv1.Machine, cluster *clusterv1.Cluster) *anywherev1.ControlPlaneUpgrade { + etcdVersion := "v1.28.3-eks-1-28-9" + return &anywherev1.ControlPlaneUpgrade{ + ObjectMeta: metav1.ObjectMeta{ + Name: "cp-upgrade-request", + Namespace: "eksa-system", + }, + Spec: anywherev1.ControlPlaneUpgradeSpec{ + Cluster: anywherev1.Ref{ + Name: cluster.Name, + Kind: "Cluster", + }, + ControlPlane: anywherev1.Ref{ + Name: "my-cp", + Kind: "KubeadmControlPlane", + }, + MachinesRequireUpgrade: []anywherev1.Ref{ + { + Name: machine[0].Name, + Kind: "Machine", + }, + { + Name: machine[1].Name, + Kind: "Machine", + }, + }, + KubernetesVersion: "v1.28.1", + KubeletVersion: "v1.28.1", + EtcdVersion: &etcdVersion, + KubeadmClusterConfig: "", + }, + } +} diff --git a/controllers/nodeupgrade_controller.go b/controllers/nodeupgrade_controller.go index 8019c504c571..d8e0dc4f84ed 100644 --- a/controllers/nodeupgrade_controller.go +++ b/controllers/nodeupgrade_controller.go @@ -26,7 +26,7 @@ import ( const ( // TODO(in-place): Get this image from the bundle instead of using the hardcoded one. - defaultUpgraderImage = "public.ecr.aws/t0n3a9y4/aws/upgrader:v1.28.3-eks-1-28-9" + defaultUpgraderImage = "public.ecr.aws/t2p4l7v3/upgrader:eksdbase" controlPlaneLabel = "node-role.kubernetes.io/control-plane" podDNEMessage = "Upgrader pod does not exist" @@ -86,11 +86,11 @@ func (r *NodeUpgradeReconciler) Reconcile(ctx context.Context, req ctrl.Request) } machineToBeUpgraded := &clusterv1.Machine{} - if err := r.client.Get(ctx, getNamespacedNameType(nodeUpgrade.Spec.Machine.Name, nodeUpgrade.Spec.Machine.Namespace), machineToBeUpgraded); err != nil { + if err := r.client.Get(ctx, GetNamespacedNameType(nodeUpgrade.Spec.Machine.Name, nodeUpgrade.Spec.Machine.Namespace), machineToBeUpgraded); err != nil { return ctrl.Result{}, err } - rClient, err := r.remoteClientRegistry.GetClient(ctx, getNamespacedNameType(machineToBeUpgraded.Spec.ClusterName, machineToBeUpgraded.Namespace)) + rClient, err := r.remoteClientRegistry.GetClient(ctx, GetNamespacedNameType(machineToBeUpgraded.Spec.ClusterName, machineToBeUpgraded.Namespace)) if err != nil { return ctrl.Result{}, err } @@ -352,7 +352,8 @@ func isControlPlane(node *corev1.Node) bool { return ok } -func getNamespacedNameType(name, namespace string) types.NamespacedName { +// GetNamespacedNameType takes name and namespace and returns NamespacedName in namespace/name format. +func GetNamespacedNameType(name, namespace string) types.NamespacedName { return types.NamespacedName{ Name: name, Namespace: namespace, @@ -384,7 +385,7 @@ func upgraderPodExists(ctx context.Context, remoteClient client.Client, nodeName func getUpgraderPod(ctx context.Context, remoteClient client.Client, nodeName string) (*corev1.Pod, error) { pod := &corev1.Pod{} - if err := remoteClient.Get(ctx, getNamespacedNameType(upgrader.PodName(nodeName), constants.EksaSystemNamespace), pod); err != nil { + if err := remoteClient.Get(ctx, GetNamespacedNameType(upgrader.PodName(nodeName), constants.EksaSystemNamespace), pod); err != nil { return nil, err } return pod, nil diff --git a/pkg/api/v1alpha1/controlplaneupgrade_types.go b/pkg/api/v1alpha1/controlplaneupgrade_types.go index 5e3aea0253cf..beb8e4ef8cad 100644 --- a/pkg/api/v1alpha1/controlplaneupgrade_types.go +++ b/pkg/api/v1alpha1/controlplaneupgrade_types.go @@ -21,9 +21,9 @@ type ControlPlaneUpgradeSpec struct { // ControlPlaneUpgradeStatus defines the observed state of ControlPlaneUpgrade. type ControlPlaneUpgradeStatus struct { - RequireUpgrade int64 `json:"requireUpgrade"` - Upgraded int64 `json:"upgraded"` - Ready bool `json:"ready"` + RequireUpgrade int64 `json:"requireUpgrade,omitempty"` + Upgraded int64 `json:"upgraded,omitempty"` + Ready bool `json:"ready,omitempty"` } //+kubebuilder:object:root=true