From c6c5b9cbd13d3ef9756b3ea945b03ed776bc0792 Mon Sep 17 00:00:00 2001 From: Rahul Ganesh Date: Tue, 13 Feb 2024 16:02:53 -0800 Subject: [PATCH 1/2] Update kube-vip manifest for InPlace upgrades Signed-off-by: Rahul Ganesh --- controllers/controlplaneupgrade_controller.go | 76 ++++++++++++++ .../controlplaneupgrade_controller_test.go | 55 ++++++++++- pkg/constants/constants.go | 3 + ...cted_first_control_plane_upgrader_pod.yaml | 9 ++ ...ected_rest_control_plane_upgrader_pod.yaml | 9 ++ pkg/nodeupgrader/upgrader.go | 98 +++++++++++++------ 6 files changed, 217 insertions(+), 33 deletions(-) diff --git a/controllers/controlplaneupgrade_controller.go b/controllers/controlplaneupgrade_controller.go index bca9699ea2c1..0014812b3c67 100644 --- a/controllers/controlplaneupgrade_controller.go +++ b/controllers/controlplaneupgrade_controller.go @@ -49,6 +49,7 @@ const ( controlPlaneUpgradeFinalizerName = "controlplaneupgrades.anywhere.eks.amazonaws.com/finalizer" kubeadmClusterConfigurationAnnotation = "controlplane.cluster.x-k8s.io/kubeadm-cluster-configuration" cloneFromNameAnnotationInfraMachine = "cluster.x-k8s.io/cloned-from-name" + kubeVipStaticPodPath = "/etc/kubernetes/manifests/kube-vip.yaml" ) // ControlPlaneUpgradeReconciler reconciles a ControlPlaneUpgradeReconciler object. @@ -141,9 +142,16 @@ func (r *ControlPlaneUpgradeReconciler) reconcile(ctx context.Context, log logr. // return early if controlplane upgrade is already complete if cpUpgrade.Status.Ready { log.Info("All Control Plane nodes are upgraded") + // check if kube-vip config map exists and clean it up + if err := cleanupKubeVipCM(ctx, log, r.client); err != nil { + return ctrl.Result{}, err + } return ctrl.Result{}, nil } + if err := createKubeVipCMIfNotExist(ctx, r.client, cpUpgrade); err != nil { + return ctrl.Result{}, err + } log.Info("Upgrading all Control Plane nodes") for idx, machineRef := range cpUpgrade.Spec.MachinesRequireUpgrade { @@ -363,3 +371,71 @@ func getCapiMachine(ctx context.Context, client client.Client, nodeUpgrade *anyw } return machine, nil } + +func cleanupKubeVipCM(ctx context.Context, log logr.Logger, client client.Client) error { + cm := &corev1.ConfigMap{} + if err := client.Get(ctx, GetNamespacedNameType(constants.KubeVipConfigMapName, constants.EksaSystemNamespace), cm); err != nil { + if apierrors.IsNotFound(err) { + log.Info("config map %s not found, skipping deletion", "ConfigMap", constants.KubeVipConfigMapName, "Namespace", constants.EksaSystemNamespace) + } else { + return fmt.Errorf("getting %s config map: %v", constants.KubeVipConfigMapName, err) + } + } else { + log.Info("Deleting kube-vip config map", "ConfigMap", constants.KubeVipConfigMapName, "Namespace", constants.EksaSystemNamespace) + if err := client.Delete(ctx, cm); err != nil { + return fmt.Errorf("deleting %s config map: %v", constants.KubeVipConfigMapName, err) + } + } + return nil +} + +func createKubeVipCMIfNotExist(ctx context.Context, client client.Client, cpUpgrade *anywherev1.ControlPlaneUpgrade) error { + kubeVipCM := &corev1.ConfigMap{} + if err := client.Get(ctx, GetNamespacedNameType(constants.KubeVipConfigMapName, constants.EksaSystemNamespace), kubeVipCM); err != nil { + if apierrors.IsNotFound(err) { + kubeVipCM, err = kubeVipConfigMap(cpUpgrade) + if err != nil { + return err + } + if err := client.Create(ctx, kubeVipCM); err != nil { + return fmt.Errorf("failed to create %s config map: %v", constants.KubeVipConfigMapName, err) + } + } else { + return fmt.Errorf("getting %s configmap: %v", constants.KubeVipConfigMapName, err) + } + } + return nil +} + +func kubeVipConfigMap(cpUpgrade *anywherev1.ControlPlaneUpgrade) (*corev1.ConfigMap, error) { + kcpSpec, err := decodeAndUnmarshalKcpSpecData(cpUpgrade.Spec.ControlPlaneSpecData) + if err != nil { + return nil, err + } + var kubeVipConfig string + for _, file := range kcpSpec.KubeadmConfigSpec.Files { + if file.Path == kubeVipStaticPodPath { + kubeVipConfig = file.Content + break + } + } + blockOwnerDeletionFlag := true + return &corev1.ConfigMap{ + TypeMeta: metav1.TypeMeta{ + Kind: "ConfigMap", + APIVersion: "v1", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: constants.KubeVipConfigMapName, + Namespace: constants.EksaSystemNamespace, + OwnerReferences: []metav1.OwnerReference{{ + APIVersion: cpUpgrade.APIVersion, + Kind: cpUpgrade.Kind, + Name: cpUpgrade.Name, + UID: cpUpgrade.UID, + BlockOwnerDeletion: &blockOwnerDeletionFlag, + }}, + }, + Data: map[string]string{constants.KubeVipManifestName: kubeVipConfig}, + }, nil +} diff --git a/controllers/controlplaneupgrade_controller_test.go b/controllers/controlplaneupgrade_controller_test.go index a064d32d6028..42608fbc3cbf 100644 --- a/controllers/controlplaneupgrade_controller_test.go +++ b/controllers/controlplaneupgrade_controller_test.go @@ -323,6 +323,7 @@ func TestCPUpgradeReconcileUpdateKubeadmConfigSuccess(t *testing.T) { ctx := context.Background() testObjs := getObjectsForCPUpgradeTest() + kubeVipCm := generateKubeVipConfigMap() for i := range testObjs.nodeUpgrades { testObjs.nodeUpgrades[i].Name = fmt.Sprintf("%s-node-upgrader", testObjs.machines[i].Name) testObjs.nodeUpgrades[i].Status = anywherev1.NodeUpgradeStatus{ @@ -331,7 +332,7 @@ func TestCPUpgradeReconcileUpdateKubeadmConfigSuccess(t *testing.T) { } objs := []runtime.Object{ testObjs.cluster, testObjs.cpUpgrade, testObjs.machines[0], testObjs.machines[1], testObjs.nodes[0], testObjs.nodes[1], - testObjs.nodeUpgrades[0], testObjs.nodeUpgrades[1], testObjs.kubeadmConfigs[0], testObjs.kubeadmConfigs[1], testObjs.infraMachines[0], testObjs.infraMachines[1], + testObjs.nodeUpgrades[0], testObjs.nodeUpgrades[1], testObjs.kubeadmConfigs[0], testObjs.kubeadmConfigs[1], testObjs.infraMachines[0], testObjs.infraMachines[1], kubeVipCm, } testObjs.nodeUpgrades[0].Status.Completed = true client := fake.NewClientBuilder().WithRuntimeObjects(objs...).Build() @@ -615,6 +616,12 @@ func generateKubeadmConfig() *bootstrapv1.KubeadmConfig { }, }, InitConfiguration: &bootstrapv1.InitConfiguration{}, + Files: []bootstrapv1.File{ + { + Path: "/etc/kubernetes/manifests/kube-vip.yaml", + Content: kubeVipSpec(), + }, + }, }, } } @@ -636,3 +643,49 @@ func generateAndSetInfraMachine(machine *clusterv1.Machine) *tinkerbellv1.Tinker }, } } + +func generateKubeVipConfigMap() *corev1.ConfigMap { + return &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: constants.KubeVipConfigMapName, + Namespace: constants.EksaSystemNamespace, + }, + + Data: map[string]string{constants.KubeVipManifestName: kubeVipSpec()}, + } +} + +func kubeVipSpec() string { + return ` | + apiVersion: v1 + kind: Pod + metadata: + name: kube-vip + namespace: kube-system + spec: + containers: + - args: + - manager + env: + - name: vip_arp + value: "true" + - name: port + value: "6443" + image: public.ecr.aws/l0g8r8j6/kube-vip/kube-vip:v0.6.4-eks-a-v0.19.0-dev-build.128 + imagePullPolicy: IfNotPresent + name: kube-vip + resources: {} + securityContext: + capabilities: + add: + - NET_ADMIN + - NET_RAW + volumeMounts: + - mountPath: /etc/kubernetes/admin.conf + name: kubeconfig + hostNetwork: true + volumes: + - hostPath: + path: /etc/kubernetes/admin.conf + name: kubeconfig` +} diff --git a/pkg/constants/constants.go b/pkg/constants/constants.go index 85526e1d5e68..2de0c111ca29 100644 --- a/pkg/constants/constants.go +++ b/pkg/constants/constants.go @@ -48,6 +48,9 @@ const ( EksaPackagesName = "eksa-packages" // UpgraderConfigMapName is the name of config map that stores the upgrader images. UpgraderConfigMapName = "in-place-upgrade" + // KubeVipConfigMapName is the name of config map that stores the kube-vip config. + KubeVipConfigMapName = "kube-vip-in-place-upgrade" + KubeVipManifestName = "kube-vip.yaml" CloudstackAnnotationSuffix = "cloudstack.anywhere.eks.amazonaws.com/v1alpha1" diff --git a/pkg/nodeupgrader/testdata/expected_first_control_plane_upgrader_pod.yaml b/pkg/nodeupgrader/testdata/expected_first_control_plane_upgrader_pod.yaml index e13baeea507e..41d3de19d0cb 100644 --- a/pkg/nodeupgrader/testdata/expected_first_control_plane_upgrader_pod.yaml +++ b/pkg/nodeupgrader/testdata/expected_first_control_plane_upgrader_pod.yaml @@ -36,6 +36,9 @@ spec: volumeMounts: - mountPath: /usr/host name: host-components + - mountPath: /eksa-upgrades/kube-vip.yaml + name: kube-vip + subPath: kube-vip.yaml - args: - --target - "1" @@ -109,4 +112,10 @@ spec: path: /foo type: DirectoryOrCreate name: host-components + - configMap: + items: + - key: kube-vip.yaml + path: kube-vip.yaml + name: kube-vip-in-place-upgrade + name: kube-vip status: {} diff --git a/pkg/nodeupgrader/testdata/expected_rest_control_plane_upgrader_pod.yaml b/pkg/nodeupgrader/testdata/expected_rest_control_plane_upgrader_pod.yaml index a5ad11f469ac..764c75e9bebd 100755 --- a/pkg/nodeupgrader/testdata/expected_rest_control_plane_upgrader_pod.yaml +++ b/pkg/nodeupgrader/testdata/expected_rest_control_plane_upgrader_pod.yaml @@ -36,6 +36,9 @@ spec: volumeMounts: - mountPath: /usr/host name: host-components + - mountPath: /eksa-upgrades/kube-vip.yaml + name: kube-vip + subPath: kube-vip.yaml - args: - --target - "1" @@ -107,4 +110,10 @@ spec: path: /foo type: DirectoryOrCreate name: host-components + - configMap: + items: + - key: kube-vip.yaml + path: kube-vip.yaml + name: kube-vip-in-place-upgrade + name: kube-vip status: {} diff --git a/pkg/nodeupgrader/upgrader.go b/pkg/nodeupgrader/upgrader.go index 40bd11bb5d2d..f76633d5bb45 100644 --- a/pkg/nodeupgrader/upgrader.go +++ b/pkg/nodeupgrader/upgrader.go @@ -39,27 +39,30 @@ func PodName(nodeName string) string { // UpgradeFirstControlPlanePod returns an upgrader pod that should be deployed on the first control plane node. func UpgradeFirstControlPlanePod(nodeName, image, kubernetesVersion, etcdVersion string) *corev1.Pod { - p := upgraderPod(nodeName, image) - p.Spec.InitContainers = containersForUpgrade(image, nodeName, "kubeadm_in_first_cp", kubernetesVersion, etcdVersion) + p := upgraderPod(nodeName, image, true) + p.Spec.InitContainers = containersForUpgrade(true, image, nodeName, "kubeadm_in_first_cp", kubernetesVersion, etcdVersion) return p } // UpgradeSecondaryControlPlanePod returns an upgrader pod that can be deployed on the remaining control plane nodes. func UpgradeSecondaryControlPlanePod(nodeName, image string) *corev1.Pod { - p := upgraderPod(nodeName, image) - p.Spec.InitContainers = containersForUpgrade(image, nodeName, "kubeadm_in_rest_cp") + p := upgraderPod(nodeName, image, true) + p.Spec.InitContainers = containersForUpgrade(true, image, nodeName, "kubeadm_in_rest_cp") return p } // UpgradeWorkerPod returns an upgrader pod that can be deployed on worker nodes. func UpgradeWorkerPod(nodeName, image string) *corev1.Pod { - p := upgraderPod(nodeName, image) - p.Spec.InitContainers = containersForUpgrade(image, nodeName, "kubeadm_in_worker") + p := upgraderPod(nodeName, image, false) + p.Spec.InitContainers = containersForUpgrade(false, image, nodeName, "kubeadm_in_worker") return p } -func upgraderPod(nodeName, image string) *corev1.Pod { - dirOrCreate := corev1.HostPathDirectoryOrCreate +func upgraderPod(nodeName, image string, isCP bool) *corev1.Pod { + volumes := []corev1.Volume{hostComponentsVolume()} + if isCP { + volumes = append(volumes, kubeVipVolume()) + } return &corev1.Pod{ ObjectMeta: metav1.ObjectMeta{ Name: PodName(nodeName), @@ -71,17 +74,7 @@ func upgraderPod(nodeName, image string) *corev1.Pod { Spec: corev1.PodSpec{ NodeName: nodeName, HostPID: true, - Volumes: []corev1.Volume{ - { - Name: "host-components", - VolumeSource: corev1.VolumeSource{ - HostPath: &corev1.HostPathVolumeSource{ - Path: "/foo", - Type: &dirOrCreate, - }, - }, - }, - }, + Volumes: volumes, Containers: []corev1.Container{ nsenterContainer(image, PostUpgradeContainerName, upgradeScript, "print_status_and_cleanup"), }, @@ -90,9 +83,9 @@ func upgraderPod(nodeName, image string) *corev1.Pod { } } -func containersForUpgrade(image, nodeName string, kubeadmUpgradeCommand ...string) []corev1.Container { +func containersForUpgrade(isCP bool, image, nodeName string, kubeadmUpgradeCommand ...string) []corev1.Container { return []corev1.Container{ - copierContainer(image), + copierContainer(image, isCP), nsenterContainer(image, ContainerdUpgraderContainerName, upgradeScript, "upgrade_containerd"), nsenterContainer(image, CNIPluginsUpgraderContainerName, upgradeScript, "cni_plugins"), nsenterContainer(image, KubeadmUpgraderContainerName, append([]string{upgradeScript}, kubeadmUpgradeCommand...)...), @@ -100,19 +93,28 @@ func containersForUpgrade(image, nodeName string, kubeadmUpgradeCommand ...strin } } -func copierContainer(image string) corev1.Container { - return corev1.Container{ - Name: CopierContainerName, - Image: image, - Command: []string{"cp"}, - Args: []string{"-r", "/eksa-upgrades", "/usr/host"}, - VolumeMounts: []corev1.VolumeMount{ - { - Name: "host-components", - MountPath: "/usr/host", - }, +func copierContainer(image string, isCP bool) corev1.Container { + volumeMount := []corev1.VolumeMount{ + { + Name: "host-components", + MountPath: "/usr/host", }, } + if isCP { + kubeVipVolMount := corev1.VolumeMount{ + Name: "kube-vip", + MountPath: fmt.Sprintf("/eksa-upgrades/%s", constants.KubeVipManifestName), + SubPath: constants.KubeVipManifestName, + } + volumeMount = append(volumeMount, kubeVipVolMount) + } + return corev1.Container{ + Name: CopierContainerName, + Image: image, + Command: []string{"cp"}, + Args: []string{"-r", "/eksa-upgrades", "/usr/host"}, + VolumeMounts: volumeMount, + } } func nsenterContainer(image, name string, extraArgs ...string) corev1.Container { @@ -135,3 +137,35 @@ func nsenterContainer(image, name string, extraArgs ...string) corev1.Container }, } } + +func hostComponentsVolume() corev1.Volume { + dirOrCreate := corev1.HostPathDirectoryOrCreate + return corev1.Volume{ + Name: "host-components", + VolumeSource: corev1.VolumeSource{ + HostPath: &corev1.HostPathVolumeSource{ + Path: "/foo", + Type: &dirOrCreate, + }, + }, + } +} + +func kubeVipVolume() corev1.Volume { + return corev1.Volume{ + Name: "kube-vip", + VolumeSource: corev1.VolumeSource{ + ConfigMap: &corev1.ConfigMapVolumeSource{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: constants.KubeVipConfigMapName, + }, + Items: []corev1.KeyToPath{ + { + Key: constants.KubeVipManifestName, + Path: constants.KubeVipManifestName, + }, + }, + }, + }, + } +} From 1996a5f73e605e5cf9bcab9a0ead2cb811160926 Mon Sep 17 00:00:00 2001 From: Rahul Ganesh Date: Thu, 15 Feb 2024 11:52:42 -0800 Subject: [PATCH 2/2] error out if kube-vip spec not found and fix formatting Signed-off-by: Rahul Ganesh --- controllers/controlplaneupgrade_controller.go | 8 +++++++- pkg/constants/constants.go | 3 ++- 2 files changed, 9 insertions(+), 2 deletions(-) diff --git a/controllers/controlplaneupgrade_controller.go b/controllers/controlplaneupgrade_controller.go index 0014812b3c67..1bfdecbd890e 100644 --- a/controllers/controlplaneupgrade_controller.go +++ b/controllers/controlplaneupgrade_controller.go @@ -24,6 +24,7 @@ import ( "time" "github.com/go-logr/logr" + "github.com/pkg/errors" corev1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -376,7 +377,7 @@ func cleanupKubeVipCM(ctx context.Context, log logr.Logger, client client.Client cm := &corev1.ConfigMap{} if err := client.Get(ctx, GetNamespacedNameType(constants.KubeVipConfigMapName, constants.EksaSystemNamespace), cm); err != nil { if apierrors.IsNotFound(err) { - log.Info("config map %s not found, skipping deletion", "ConfigMap", constants.KubeVipConfigMapName, "Namespace", constants.EksaSystemNamespace) + log.Info("config map %s not found, skipping deletion", "ConfigMap", constants.KubeVipConfigMapName, "Namespace", constants.EksaSystemNamespace) } else { return fmt.Errorf("getting %s config map: %v", constants.KubeVipConfigMapName, err) } @@ -419,6 +420,11 @@ func kubeVipConfigMap(cpUpgrade *anywherev1.ControlPlaneUpgrade) (*corev1.Config break } } + + if kubeVipConfig == "" { + return nil, errors.New("fetching kube-vip manifest from KubeadmConfigSpec") + } + blockOwnerDeletionFlag := true return &corev1.ConfigMap{ TypeMeta: metav1.TypeMeta{ diff --git a/pkg/constants/constants.go b/pkg/constants/constants.go index 2de0c111ca29..6f3b6fc8699a 100644 --- a/pkg/constants/constants.go +++ b/pkg/constants/constants.go @@ -50,7 +50,8 @@ const ( UpgraderConfigMapName = "in-place-upgrade" // KubeVipConfigMapName is the name of config map that stores the kube-vip config. KubeVipConfigMapName = "kube-vip-in-place-upgrade" - KubeVipManifestName = "kube-vip.yaml" + // KubeVipManifestName is the name of kube-vip spec file. + KubeVipManifestName = "kube-vip.yaml" CloudstackAnnotationSuffix = "cloudstack.anywhere.eks.amazonaws.com/v1alpha1"