-
Notifications
You must be signed in to change notification settings - Fork 288
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Update kube-vip for InPlace upgrades. #7595
Changes from all commits
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -24,6 +24,7 @@ import ( | |
"time" | ||
|
||
"github.com/go-logr/logr" | ||
"github.com/pkg/errors" | ||
corev1 "k8s.io/api/core/v1" | ||
apierrors "k8s.io/apimachinery/pkg/api/errors" | ||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" | ||
|
@@ -49,6 +50,7 @@ const ( | |
controlPlaneUpgradeFinalizerName = "controlplaneupgrades.anywhere.eks.amazonaws.com/finalizer" | ||
kubeadmClusterConfigurationAnnotation = "controlplane.cluster.x-k8s.io/kubeadm-cluster-configuration" | ||
cloneFromNameAnnotationInfraMachine = "cluster.x-k8s.io/cloned-from-name" | ||
kubeVipStaticPodPath = "/etc/kubernetes/manifests/kube-vip.yaml" | ||
) | ||
|
||
// ControlPlaneUpgradeReconciler reconciles a ControlPlaneUpgradeReconciler object. | ||
|
@@ -141,9 +143,16 @@ func (r *ControlPlaneUpgradeReconciler) reconcile(ctx context.Context, log logr. | |
// return early if controlplane upgrade is already complete | ||
if cpUpgrade.Status.Ready { | ||
log.Info("All Control Plane nodes are upgraded") | ||
// check if kube-vip config map exists and clean it up | ||
if err := cleanupKubeVipCM(ctx, log, r.client); err != nil { | ||
return ctrl.Result{}, err | ||
} | ||
return ctrl.Result{}, nil | ||
} | ||
|
||
if err := createKubeVipCMIfNotExist(ctx, r.client, cpUpgrade); err != nil { | ||
return ctrl.Result{}, err | ||
} | ||
log.Info("Upgrading all Control Plane nodes") | ||
|
||
for idx, machineRef := range cpUpgrade.Spec.MachinesRequireUpgrade { | ||
|
@@ -363,3 +372,76 @@ func getCapiMachine(ctx context.Context, client client.Client, nodeUpgrade *anyw | |
} | ||
return machine, nil | ||
} | ||
|
||
func cleanupKubeVipCM(ctx context.Context, log logr.Logger, client client.Client) error { | ||
cm := &corev1.ConfigMap{} | ||
if err := client.Get(ctx, GetNamespacedNameType(constants.KubeVipConfigMapName, constants.EksaSystemNamespace), cm); err != nil { | ||
if apierrors.IsNotFound(err) { | ||
log.Info("config map %s not found, skipping deletion", "ConfigMap", constants.KubeVipConfigMapName, "Namespace", constants.EksaSystemNamespace) | ||
} else { | ||
return fmt.Errorf("getting %s config map: %v", constants.KubeVipConfigMapName, err) | ||
} | ||
} else { | ||
log.Info("Deleting kube-vip config map", "ConfigMap", constants.KubeVipConfigMapName, "Namespace", constants.EksaSystemNamespace) | ||
if err := client.Delete(ctx, cm); err != nil { | ||
return fmt.Errorf("deleting %s config map: %v", constants.KubeVipConfigMapName, err) | ||
} | ||
} | ||
return nil | ||
} | ||
|
||
func createKubeVipCMIfNotExist(ctx context.Context, client client.Client, cpUpgrade *anywherev1.ControlPlaneUpgrade) error { | ||
kubeVipCM := &corev1.ConfigMap{} | ||
if err := client.Get(ctx, GetNamespacedNameType(constants.KubeVipConfigMapName, constants.EksaSystemNamespace), kubeVipCM); err != nil { | ||
if apierrors.IsNotFound(err) { | ||
kubeVipCM, err = kubeVipConfigMap(cpUpgrade) | ||
if err != nil { | ||
return err | ||
} | ||
if err := client.Create(ctx, kubeVipCM); err != nil { | ||
return fmt.Errorf("failed to create %s config map: %v", constants.KubeVipConfigMapName, err) | ||
} | ||
} else { | ||
return fmt.Errorf("getting %s configmap: %v", constants.KubeVipConfigMapName, err) | ||
} | ||
} | ||
return nil | ||
} | ||
|
||
func kubeVipConfigMap(cpUpgrade *anywherev1.ControlPlaneUpgrade) (*corev1.ConfigMap, error) { | ||
kcpSpec, err := decodeAndUnmarshalKcpSpecData(cpUpgrade.Spec.ControlPlaneSpecData) | ||
if err != nil { | ||
return nil, err | ||
} | ||
var kubeVipConfig string | ||
for _, file := range kcpSpec.KubeadmConfigSpec.Files { | ||
if file.Path == kubeVipStaticPodPath { | ||
kubeVipConfig = file.Content | ||
break | ||
} | ||
} | ||
|
||
if kubeVipConfig == "" { | ||
return nil, errors.New("fetching kube-vip manifest from KubeadmConfigSpec") | ||
} | ||
|
||
blockOwnerDeletionFlag := true | ||
return &corev1.ConfigMap{ | ||
TypeMeta: metav1.TypeMeta{ | ||
Kind: "ConfigMap", | ||
APIVersion: "v1", | ||
}, | ||
ObjectMeta: metav1.ObjectMeta{ | ||
Name: constants.KubeVipConfigMapName, | ||
Namespace: constants.EksaSystemNamespace, | ||
OwnerReferences: []metav1.OwnerReference{{ | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. This is good but this doesn't ensure it gets cleaned up right away right? Meaning, if a new CPUpgrade needs to get created with the same name due to some subsequent upgrade, we will never get the new CM for this if it never gets deleted? There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. That's a good point. I will clean it up once all the nodes are upgraded for a given cpu. This way we ensure we always clean it after a successful upgrade. There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. I think when you have |
||
APIVersion: cpUpgrade.APIVersion, | ||
Kind: cpUpgrade.Kind, | ||
Name: cpUpgrade.Name, | ||
UID: cpUpgrade.UID, | ||
BlockOwnerDeletion: &blockOwnerDeletionFlag, | ||
}}, | ||
}, | ||
Data: map[string]string{constants.KubeVipManifestName: kubeVipConfig}, | ||
}, nil | ||
} |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
can you add a check here to see
kubeVipConfig
is not empty after the for loop? incase the kube-vip is not found in files, just error out