Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Update kube-vip for InPlace upgrades. #7595

Merged
merged 2 commits into from
Feb 16, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
82 changes: 82 additions & 0 deletions controllers/controlplaneupgrade_controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -24,6 +24,7 @@ import (
"time"

"github.com/go-logr/logr"
"github.com/pkg/errors"
corev1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
Expand All @@ -49,6 +50,7 @@ const (
controlPlaneUpgradeFinalizerName = "controlplaneupgrades.anywhere.eks.amazonaws.com/finalizer"
kubeadmClusterConfigurationAnnotation = "controlplane.cluster.x-k8s.io/kubeadm-cluster-configuration"
cloneFromNameAnnotationInfraMachine = "cluster.x-k8s.io/cloned-from-name"
kubeVipStaticPodPath = "/etc/kubernetes/manifests/kube-vip.yaml"
)

// ControlPlaneUpgradeReconciler reconciles a ControlPlaneUpgradeReconciler object.
Expand Down Expand Up @@ -141,9 +143,16 @@ func (r *ControlPlaneUpgradeReconciler) reconcile(ctx context.Context, log logr.
// return early if controlplane upgrade is already complete
if cpUpgrade.Status.Ready {
log.Info("All Control Plane nodes are upgraded")
// check if kube-vip config map exists and clean it up
if err := cleanupKubeVipCM(ctx, log, r.client); err != nil {
return ctrl.Result{}, err
}
return ctrl.Result{}, nil
}

if err := createKubeVipCMIfNotExist(ctx, r.client, cpUpgrade); err != nil {
return ctrl.Result{}, err
}
log.Info("Upgrading all Control Plane nodes")

for idx, machineRef := range cpUpgrade.Spec.MachinesRequireUpgrade {
Expand Down Expand Up @@ -363,3 +372,76 @@ func getCapiMachine(ctx context.Context, client client.Client, nodeUpgrade *anyw
}
return machine, nil
}

func cleanupKubeVipCM(ctx context.Context, log logr.Logger, client client.Client) error {
cm := &corev1.ConfigMap{}
if err := client.Get(ctx, GetNamespacedNameType(constants.KubeVipConfigMapName, constants.EksaSystemNamespace), cm); err != nil {
if apierrors.IsNotFound(err) {
log.Info("config map %s not found, skipping deletion", "ConfigMap", constants.KubeVipConfigMapName, "Namespace", constants.EksaSystemNamespace)
} else {
return fmt.Errorf("getting %s config map: %v", constants.KubeVipConfigMapName, err)
}
} else {
log.Info("Deleting kube-vip config map", "ConfigMap", constants.KubeVipConfigMapName, "Namespace", constants.EksaSystemNamespace)
if err := client.Delete(ctx, cm); err != nil {
return fmt.Errorf("deleting %s config map: %v", constants.KubeVipConfigMapName, err)
}
}
return nil
}

func createKubeVipCMIfNotExist(ctx context.Context, client client.Client, cpUpgrade *anywherev1.ControlPlaneUpgrade) error {
kubeVipCM := &corev1.ConfigMap{}
if err := client.Get(ctx, GetNamespacedNameType(constants.KubeVipConfigMapName, constants.EksaSystemNamespace), kubeVipCM); err != nil {
if apierrors.IsNotFound(err) {
kubeVipCM, err = kubeVipConfigMap(cpUpgrade)
if err != nil {
return err
}
if err := client.Create(ctx, kubeVipCM); err != nil {
return fmt.Errorf("failed to create %s config map: %v", constants.KubeVipConfigMapName, err)
}
} else {
return fmt.Errorf("getting %s configmap: %v", constants.KubeVipConfigMapName, err)
}
}
return nil
}

func kubeVipConfigMap(cpUpgrade *anywherev1.ControlPlaneUpgrade) (*corev1.ConfigMap, error) {
kcpSpec, err := decodeAndUnmarshalKcpSpecData(cpUpgrade.Spec.ControlPlaneSpecData)
if err != nil {
return nil, err
}
var kubeVipConfig string
for _, file := range kcpSpec.KubeadmConfigSpec.Files {
if file.Path == kubeVipStaticPodPath {
kubeVipConfig = file.Content
break
}
}
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

can you add a check here to see kubeVipConfig is not empty after the for loop? incase the kube-vip is not found in files, just error out


if kubeVipConfig == "" {
return nil, errors.New("fetching kube-vip manifest from KubeadmConfigSpec")
}

blockOwnerDeletionFlag := true
return &corev1.ConfigMap{
TypeMeta: metav1.TypeMeta{
Kind: "ConfigMap",
APIVersion: "v1",
},
ObjectMeta: metav1.ObjectMeta{
Name: constants.KubeVipConfigMapName,
Namespace: constants.EksaSystemNamespace,
OwnerReferences: []metav1.OwnerReference{{
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This is good but this doesn't ensure it gets cleaned up right away right? Meaning, if a new CPUpgrade needs to get created with the same name due to some subsequent upgrade, we will never get the new CM for this if it never gets deleted?

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

That's a good point. I will clean it up once all the nodes are upgraded for a given cpu. This way we ensure we always clean it after a successful upgrade.

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I think when you have BlockOwnerDeletion set on the child object, it blocks the parent object deletion. @rahulbabu95 you can test it out just to be sure

APIVersion: cpUpgrade.APIVersion,
Kind: cpUpgrade.Kind,
Name: cpUpgrade.Name,
UID: cpUpgrade.UID,
BlockOwnerDeletion: &blockOwnerDeletionFlag,
}},
},
Data: map[string]string{constants.KubeVipManifestName: kubeVipConfig},
}, nil
}
55 changes: 54 additions & 1 deletion controllers/controlplaneupgrade_controller_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -323,6 +323,7 @@ func TestCPUpgradeReconcileUpdateKubeadmConfigSuccess(t *testing.T) {
ctx := context.Background()

testObjs := getObjectsForCPUpgradeTest()
kubeVipCm := generateKubeVipConfigMap()
for i := range testObjs.nodeUpgrades {
testObjs.nodeUpgrades[i].Name = fmt.Sprintf("%s-node-upgrader", testObjs.machines[i].Name)
testObjs.nodeUpgrades[i].Status = anywherev1.NodeUpgradeStatus{
Expand All @@ -331,7 +332,7 @@ func TestCPUpgradeReconcileUpdateKubeadmConfigSuccess(t *testing.T) {
}
objs := []runtime.Object{
testObjs.cluster, testObjs.cpUpgrade, testObjs.machines[0], testObjs.machines[1], testObjs.nodes[0], testObjs.nodes[1],
testObjs.nodeUpgrades[0], testObjs.nodeUpgrades[1], testObjs.kubeadmConfigs[0], testObjs.kubeadmConfigs[1], testObjs.infraMachines[0], testObjs.infraMachines[1],
testObjs.nodeUpgrades[0], testObjs.nodeUpgrades[1], testObjs.kubeadmConfigs[0], testObjs.kubeadmConfigs[1], testObjs.infraMachines[0], testObjs.infraMachines[1], kubeVipCm,
}
testObjs.nodeUpgrades[0].Status.Completed = true
client := fake.NewClientBuilder().WithRuntimeObjects(objs...).Build()
Expand Down Expand Up @@ -615,6 +616,12 @@ func generateKubeadmConfig() *bootstrapv1.KubeadmConfig {
},
},
InitConfiguration: &bootstrapv1.InitConfiguration{},
Files: []bootstrapv1.File{
{
Path: "/etc/kubernetes/manifests/kube-vip.yaml",
Content: kubeVipSpec(),
},
},
},
}
}
Expand All @@ -636,3 +643,49 @@ func generateAndSetInfraMachine(machine *clusterv1.Machine) *tinkerbellv1.Tinker
},
}
}

func generateKubeVipConfigMap() *corev1.ConfigMap {
return &corev1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{
Name: constants.KubeVipConfigMapName,
Namespace: constants.EksaSystemNamespace,
},

Data: map[string]string{constants.KubeVipManifestName: kubeVipSpec()},
}
}

func kubeVipSpec() string {
return ` |
apiVersion: v1
kind: Pod
metadata:
name: kube-vip
namespace: kube-system
spec:
containers:
- args:
- manager
env:
- name: vip_arp
value: "true"
- name: port
value: "6443"
image: public.ecr.aws/l0g8r8j6/kube-vip/kube-vip:v0.6.4-eks-a-v0.19.0-dev-build.128
imagePullPolicy: IfNotPresent
name: kube-vip
resources: {}
securityContext:
capabilities:
add:
- NET_ADMIN
- NET_RAW
volumeMounts:
- mountPath: /etc/kubernetes/admin.conf
name: kubeconfig
hostNetwork: true
volumes:
- hostPath:
path: /etc/kubernetes/admin.conf
name: kubeconfig`
}
4 changes: 4 additions & 0 deletions pkg/constants/constants.go
Original file line number Diff line number Diff line change
Expand Up @@ -48,6 +48,10 @@ const (
EksaPackagesName = "eksa-packages"
// UpgraderConfigMapName is the name of config map that stores the upgrader images.
UpgraderConfigMapName = "in-place-upgrade"
// KubeVipConfigMapName is the name of config map that stores the kube-vip config.
KubeVipConfigMapName = "kube-vip-in-place-upgrade"
// KubeVipManifestName is the name of kube-vip spec file.
KubeVipManifestName = "kube-vip.yaml"

CloudstackAnnotationSuffix = "cloudstack.anywhere.eks.amazonaws.com/v1alpha1"

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -36,6 +36,9 @@ spec:
volumeMounts:
- mountPath: /usr/host
name: host-components
- mountPath: /eksa-upgrades/kube-vip.yaml
name: kube-vip
subPath: kube-vip.yaml
- args:
- --target
- "1"
Expand Down Expand Up @@ -109,4 +112,10 @@ spec:
path: /foo
type: DirectoryOrCreate
name: host-components
- configMap:
items:
- key: kube-vip.yaml
path: kube-vip.yaml
name: kube-vip-in-place-upgrade
name: kube-vip
status: {}
Original file line number Diff line number Diff line change
Expand Up @@ -36,6 +36,9 @@ spec:
volumeMounts:
- mountPath: /usr/host
name: host-components
- mountPath: /eksa-upgrades/kube-vip.yaml
name: kube-vip
subPath: kube-vip.yaml
- args:
- --target
- "1"
Expand Down Expand Up @@ -107,4 +110,10 @@ spec:
path: /foo
type: DirectoryOrCreate
name: host-components
- configMap:
items:
- key: kube-vip.yaml
path: kube-vip.yaml
name: kube-vip-in-place-upgrade
name: kube-vip
status: {}
98 changes: 66 additions & 32 deletions pkg/nodeupgrader/upgrader.go
Original file line number Diff line number Diff line change
Expand Up @@ -39,27 +39,30 @@ func PodName(nodeName string) string {

// UpgradeFirstControlPlanePod returns an upgrader pod that should be deployed on the first control plane node.
func UpgradeFirstControlPlanePod(nodeName, image, kubernetesVersion, etcdVersion string) *corev1.Pod {
p := upgraderPod(nodeName, image)
p.Spec.InitContainers = containersForUpgrade(image, nodeName, "kubeadm_in_first_cp", kubernetesVersion, etcdVersion)
p := upgraderPod(nodeName, image, true)
p.Spec.InitContainers = containersForUpgrade(true, image, nodeName, "kubeadm_in_first_cp", kubernetesVersion, etcdVersion)
return p
}

// UpgradeSecondaryControlPlanePod returns an upgrader pod that can be deployed on the remaining control plane nodes.
func UpgradeSecondaryControlPlanePod(nodeName, image string) *corev1.Pod {
p := upgraderPod(nodeName, image)
p.Spec.InitContainers = containersForUpgrade(image, nodeName, "kubeadm_in_rest_cp")
p := upgraderPod(nodeName, image, true)
p.Spec.InitContainers = containersForUpgrade(true, image, nodeName, "kubeadm_in_rest_cp")
return p
}

// UpgradeWorkerPod returns an upgrader pod that can be deployed on worker nodes.
func UpgradeWorkerPod(nodeName, image string) *corev1.Pod {
p := upgraderPod(nodeName, image)
p.Spec.InitContainers = containersForUpgrade(image, nodeName, "kubeadm_in_worker")
p := upgraderPod(nodeName, image, false)
p.Spec.InitContainers = containersForUpgrade(false, image, nodeName, "kubeadm_in_worker")
return p
}

func upgraderPod(nodeName, image string) *corev1.Pod {
dirOrCreate := corev1.HostPathDirectoryOrCreate
func upgraderPod(nodeName, image string, isCP bool) *corev1.Pod {
volumes := []corev1.Volume{hostComponentsVolume()}
if isCP {
volumes = append(volumes, kubeVipVolume())
}
return &corev1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: PodName(nodeName),
Expand All @@ -71,17 +74,7 @@ func upgraderPod(nodeName, image string) *corev1.Pod {
Spec: corev1.PodSpec{
NodeName: nodeName,
HostPID: true,
Volumes: []corev1.Volume{
{
Name: "host-components",
VolumeSource: corev1.VolumeSource{
HostPath: &corev1.HostPathVolumeSource{
Path: "/foo",
Type: &dirOrCreate,
},
},
},
},
Volumes: volumes,
Containers: []corev1.Container{
nsenterContainer(image, PostUpgradeContainerName, upgradeScript, "print_status_and_cleanup"),
},
Expand All @@ -90,29 +83,38 @@ func upgraderPod(nodeName, image string) *corev1.Pod {
}
}

func containersForUpgrade(image, nodeName string, kubeadmUpgradeCommand ...string) []corev1.Container {
func containersForUpgrade(isCP bool, image, nodeName string, kubeadmUpgradeCommand ...string) []corev1.Container {
return []corev1.Container{
copierContainer(image),
copierContainer(image, isCP),
nsenterContainer(image, ContainerdUpgraderContainerName, upgradeScript, "upgrade_containerd"),
nsenterContainer(image, CNIPluginsUpgraderContainerName, upgradeScript, "cni_plugins"),
nsenterContainer(image, KubeadmUpgraderContainerName, append([]string{upgradeScript}, kubeadmUpgradeCommand...)...),
nsenterContainer(image, KubeletUpgradeContainerName, upgradeScript, "kubelet_and_kubectl"),
}
}

func copierContainer(image string) corev1.Container {
return corev1.Container{
Name: CopierContainerName,
Image: image,
Command: []string{"cp"},
Args: []string{"-r", "/eksa-upgrades", "/usr/host"},
VolumeMounts: []corev1.VolumeMount{
{
Name: "host-components",
MountPath: "/usr/host",
},
func copierContainer(image string, isCP bool) corev1.Container {
volumeMount := []corev1.VolumeMount{
{
Name: "host-components",
MountPath: "/usr/host",
},
}
if isCP {
kubeVipVolMount := corev1.VolumeMount{
Name: "kube-vip",
MountPath: fmt.Sprintf("/eksa-upgrades/%s", constants.KubeVipManifestName),
SubPath: constants.KubeVipManifestName,
}
volumeMount = append(volumeMount, kubeVipVolMount)
}
return corev1.Container{
Name: CopierContainerName,
Image: image,
Command: []string{"cp"},
Args: []string{"-r", "/eksa-upgrades", "/usr/host"},
VolumeMounts: volumeMount,
}
}

func nsenterContainer(image, name string, extraArgs ...string) corev1.Container {
Expand All @@ -135,3 +137,35 @@ func nsenterContainer(image, name string, extraArgs ...string) corev1.Container
},
}
}

func hostComponentsVolume() corev1.Volume {
dirOrCreate := corev1.HostPathDirectoryOrCreate
return corev1.Volume{
Name: "host-components",
VolumeSource: corev1.VolumeSource{
HostPath: &corev1.HostPathVolumeSource{
Path: "/foo",
Type: &dirOrCreate,
},
},
}
}

func kubeVipVolume() corev1.Volume {
return corev1.Volume{
Name: "kube-vip",
VolumeSource: corev1.VolumeSource{
ConfigMap: &corev1.ConfigMapVolumeSource{
LocalObjectReference: corev1.LocalObjectReference{
Name: constants.KubeVipConfigMapName,
},
Items: []corev1.KeyToPath{
{
Key: constants.KubeVipManifestName,
Path: constants.KubeVipManifestName,
},
},
},
},
}
}
Loading