Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Add methods to fetch upgrader image from bundle #7283

Merged
merged 1 commit into from
Jan 18, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 2 additions & 2 deletions controllers/controlplaneupgrade_controller_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -205,7 +205,7 @@ func getObjectsForCPUpgradeTest() (*clusterv1.Cluster, []*clusterv1.Machine, []*
machines := []*clusterv1.Machine{machine1, machine2}
nodes := []*corev1.Node{node1, node2}
nodeUpgrades := []*anywherev1.NodeUpgrade{nodeUpgrade1, nodeUpgrade2}
cpUpgrade := generateCPUpgrade(machines, cluster)
cpUpgrade := generateCPUpgrade(machines)
return cluster, machines, nodes, cpUpgrade, nodeUpgrades
}

Expand All @@ -218,7 +218,7 @@ func cpUpgradeRequest(cpUpgrade *anywherev1.ControlPlaneUpgrade) reconcile.Reque
}
}

func generateCPUpgrade(machine []*clusterv1.Machine, cluster *clusterv1.Cluster) *anywherev1.ControlPlaneUpgrade {
func generateCPUpgrade(machine []*clusterv1.Machine) *anywherev1.ControlPlaneUpgrade {
etcdVersion := "v1.28.3-eks-1-28-9"
return &anywherev1.ControlPlaneUpgrade{
ObjectMeta: metav1.ObjectMeta{
Expand Down
4 changes: 2 additions & 2 deletions controllers/machinedeploymentupgrade_controller_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -146,7 +146,7 @@ func getObjectsForMDUpgradeTest() (*clusterv1.Cluster, *clusterv1.Machine, *core
node := generateNode()
machine := generateMachine(cluster, node)
nodeUpgrade := generateNodeUpgrade(machine)
mdUpgrade := generateMDUpgrade(machine, cluster)
mdUpgrade := generateMDUpgrade(machine)
return cluster, machine, node, mdUpgrade, nodeUpgrade
}

Expand All @@ -159,7 +159,7 @@ func mdUpgradeRequest(mdUpgrade *anywherev1.MachineDeploymentUpgrade) reconcile.
}
}

func generateMDUpgrade(machine *clusterv1.Machine, cluster *clusterv1.Cluster) *anywherev1.MachineDeploymentUpgrade {
func generateMDUpgrade(machine *clusterv1.Machine) *anywherev1.MachineDeploymentUpgrade {
return &anywherev1.MachineDeploymentUpgrade{
ObjectMeta: metav1.ObjectMeta{
Name: "md-upgrade-request",
Expand Down
25 changes: 18 additions & 7 deletions controllers/nodeupgrade_controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,7 @@
"time"

"github.com/go-logr/logr"
"github.com/pkg/errors"
corev1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
Expand All @@ -25,10 +26,8 @@
)

const (
// TODO(in-place): Get this image from the bundle instead of using the hardcoded one.
defaultUpgraderImage = "public.ecr.aws/t2p4l7v3/upgrader:eksdbase"
controlPlaneLabel = "node-role.kubernetes.io/control-plane"
podDNEMessage = "Upgrader pod does not exist"
controlPlaneLabel = "node-role.kubernetes.io/control-plane"
podDNEMessage = "Upgrader pod does not exist"

// nodeUpgradeFinalizerName is the finalizer added to NodeUpgrade objects to handle deletion.
nodeUpgradeFinalizerName = "nodeupgrades.anywhere.eks.amazonaws.com/finalizer"
Expand Down Expand Up @@ -168,14 +167,26 @@
return ctrl.Result{}, nil
}

configMap := &corev1.ConfigMap{}
if err := remoteClient.Get(ctx, types.NamespacedName{Name: constants.UpgraderConfigMapName, Namespace: constants.EksaSystemNamespace}, configMap); err != nil {
Copy link
Member

@vivek-koppuru vivek-koppuru Jan 17, 2024

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Other option is so pass in the config map from here to node upgrade object, just so that if node upgrader is run on it's own for any reason, we can pass in any config map. If we decide to mount the config map to the upgrader pod directly, then that gives us a different amount of flexibility where we can deploy the pod directly with a different config map mounted. Just some thoughts that I have to consider.

return ctrl.Result{}, err
}

Check warning on line 173 in controllers/nodeupgrade_controller.go

View check run for this annotation

Codecov / codecov/patch

controllers/nodeupgrade_controller.go#L172-L173

Added lines #L172 - L173 were not covered by tests
if configMap.Data == nil {
return ctrl.Result{}, errors.New("upgrader config map is empty")
}

Check warning on line 176 in controllers/nodeupgrade_controller.go

View check run for this annotation

Codecov / codecov/patch

controllers/nodeupgrade_controller.go#L175-L176

Added lines #L175 - L176 were not covered by tests
upgraderImage, ok := configMap.Data[nodeUpgrade.Spec.KubernetesVersion]
if !ok {
return ctrl.Result{}, fmt.Errorf("upgrader image corresponding to EKS Distro version %s not found in the config map", nodeUpgrade.Spec.KubernetesVersion)
}

Check warning on line 180 in controllers/nodeupgrade_controller.go

View check run for this annotation

Codecov / codecov/patch

controllers/nodeupgrade_controller.go#L179-L180

Added lines #L179 - L180 were not covered by tests

if isControlPlane(node) {
if nodeUpgrade.Spec.FirstNodeToBeUpgraded {
upgraderPod = upgrader.UpgradeFirstControlPlanePod(node.Name, defaultUpgraderImage, nodeUpgrade.Spec.KubernetesVersion, *nodeUpgrade.Spec.EtcdVersion)
upgraderPod = upgrader.UpgradeFirstControlPlanePod(node.Name, upgraderImage, nodeUpgrade.Spec.KubernetesVersion, *nodeUpgrade.Spec.EtcdVersion)
} else {
upgraderPod = upgrader.UpgradeSecondaryControlPlanePod(node.Name, defaultUpgraderImage)
upgraderPod = upgrader.UpgradeSecondaryControlPlanePod(node.Name, upgraderImage)
}
} else {
upgraderPod = upgrader.UpgradeWorkerPod(node.Name, defaultUpgraderImage)
upgraderPod = upgrader.UpgradeWorkerPod(node.Name, upgraderImage)
}

if err := remoteClient.Create(ctx, upgraderPod); err != nil {
Expand Down
39 changes: 25 additions & 14 deletions controllers/nodeupgrade_controller_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -26,13 +26,13 @@ func TestNodeUpgradeReconcilerReconcileFirstControlPlane(t *testing.T) {
ctrl := gomock.NewController(t)
clientRegistry := mocks.NewMockRemoteClientRegistry(ctrl)

cluster, machine, node, nodeUpgrade := getObjectsForNodeUpgradeTest()
cluster, machine, node, nodeUpgrade, configMap := getObjectsForNodeUpgradeTest()
nodeUpgrade.Spec.FirstNodeToBeUpgraded = true
nodeUpgrade.Spec.EtcdVersion = ptr.String("v3.5.9-eks-1-28-9")
node.Labels = map[string]string{
"node-role.kubernetes.io/control-plane": "true",
}
client := fake.NewClientBuilder().WithRuntimeObjects(cluster, machine, node, nodeUpgrade).Build()
client := fake.NewClientBuilder().WithRuntimeObjects(cluster, machine, node, nodeUpgrade, configMap).Build()

clientRegistry.EXPECT().GetClient(ctx, types.NamespacedName{Name: cluster.Name, Namespace: cluster.Namespace}).Return(client, nil)

Expand All @@ -52,11 +52,11 @@ func TestNodeUpgradeReconcilerReconcileNextControlPlane(t *testing.T) {
ctrl := gomock.NewController(t)
clientRegistry := mocks.NewMockRemoteClientRegistry(ctrl)

cluster, machine, node, nodeUpgrade := getObjectsForNodeUpgradeTest()
cluster, machine, node, nodeUpgrade, configMap := getObjectsForNodeUpgradeTest()
node.Labels = map[string]string{
"node-role.kubernetes.io/control-plane": "true",
}
client := fake.NewClientBuilder().WithRuntimeObjects(cluster, machine, node, nodeUpgrade).Build()
client := fake.NewClientBuilder().WithRuntimeObjects(cluster, machine, node, nodeUpgrade, configMap).Build()

clientRegistry.EXPECT().GetClient(ctx, types.NamespacedName{Name: cluster.Name, Namespace: cluster.Namespace}).Return(client, nil)

Expand All @@ -76,8 +76,8 @@ func TestNodeUpgradeReconcilerReconcileWorker(t *testing.T) {
ctrl := gomock.NewController(t)
clientRegistry := mocks.NewMockRemoteClientRegistry(ctrl)

cluster, machine, node, nodeUpgrade := getObjectsForNodeUpgradeTest()
client := fake.NewClientBuilder().WithRuntimeObjects(cluster, machine, node, nodeUpgrade).Build()
cluster, machine, node, nodeUpgrade, configMap := getObjectsForNodeUpgradeTest()
client := fake.NewClientBuilder().WithRuntimeObjects(cluster, machine, node, nodeUpgrade, configMap).Build()

clientRegistry.EXPECT().GetClient(ctx, types.NamespacedName{Name: cluster.Name, Namespace: cluster.Namespace}).Return(client, nil)

Expand All @@ -97,8 +97,8 @@ func TestNodeUpgradeReconcilerReconcileCreateUpgraderPodState(t *testing.T) {
ctrl := gomock.NewController(t)
clientRegistry := mocks.NewMockRemoteClientRegistry(ctrl)

cluster, machine, node, nodeUpgrade := getObjectsForNodeUpgradeTest()
client := fake.NewClientBuilder().WithRuntimeObjects(cluster, machine, node, nodeUpgrade).Build()
cluster, machine, node, nodeUpgrade, configMap := getObjectsForNodeUpgradeTest()
client := fake.NewClientBuilder().WithRuntimeObjects(cluster, machine, node, nodeUpgrade, configMap).Build()

clientRegistry.EXPECT().GetClient(ctx, types.NamespacedName{Name: cluster.Name, Namespace: cluster.Namespace}).Return(client, nil).Times(2)

Expand Down Expand Up @@ -158,8 +158,8 @@ func TestNodeUpgradeReconcilerReconcileDelete(t *testing.T) {
ctrl := gomock.NewController(t)
clientRegistry := mocks.NewMockRemoteClientRegistry(ctrl)

cluster, machine, node, nodeUpgrade := getObjectsForNodeUpgradeTest()
client := fake.NewClientBuilder().WithRuntimeObjects(cluster, machine, node, nodeUpgrade).Build()
cluster, machine, node, nodeUpgrade, configMap := getObjectsForNodeUpgradeTest()
client := fake.NewClientBuilder().WithRuntimeObjects(cluster, machine, node, nodeUpgrade, configMap).Build()

clientRegistry.EXPECT().GetClient(ctx, types.NamespacedName{Name: cluster.Name, Namespace: cluster.Namespace}).Return(client, nil).Times(2)

Expand Down Expand Up @@ -189,8 +189,8 @@ func TestNodeUpgradeReconcilerReconcileDeleteUpgraderPodAlreadyDeleted(t *testin
ctrl := gomock.NewController(t)
clientRegistry := mocks.NewMockRemoteClientRegistry(ctrl)

cluster, machine, node, nodeUpgrade := getObjectsForNodeUpgradeTest()
client := fake.NewClientBuilder().WithRuntimeObjects(cluster, machine, node, nodeUpgrade).Build()
cluster, machine, node, nodeUpgrade, configMap := getObjectsForNodeUpgradeTest()
client := fake.NewClientBuilder().WithRuntimeObjects(cluster, machine, node, nodeUpgrade, configMap).Build()

clientRegistry.EXPECT().GetClient(ctx, types.NamespacedName{Name: cluster.Name, Namespace: cluster.Namespace}).Return(client, nil).Times(2)

Expand All @@ -217,12 +217,13 @@ func TestNodeUpgradeReconcilerReconcileDeleteUpgraderPodAlreadyDeleted(t *testin
g.Expect(err).To(MatchError("pods \"node01-node-upgrader\" not found"))
}

func getObjectsForNodeUpgradeTest() (*clusterv1.Cluster, *clusterv1.Machine, *corev1.Node, *anywherev1.NodeUpgrade) {
func getObjectsForNodeUpgradeTest() (*clusterv1.Cluster, *clusterv1.Machine, *corev1.Node, *anywherev1.NodeUpgrade, *corev1.ConfigMap) {
cluster := generateCluster()
node := generateNode()
machine := generateMachine(cluster, node)
nodeUpgrade := generateNodeUpgrade(machine)
return cluster, machine, node, nodeUpgrade
configMap := generateConfigMap()
return cluster, machine, node, nodeUpgrade, configMap
}

func nodeUpgradeRequest(nodeUpgrade *anywherev1.NodeUpgrade) reconcile.Request {
Expand Down Expand Up @@ -284,3 +285,13 @@ func generateCluster() *clusterv1.Cluster {
},
}
}

func generateConfigMap() *corev1.ConfigMap {
return &corev1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{
Name: "in-place-upgrade",
Namespace: "eksa-system",
},
Data: map[string]string{"v1.28.1": "test"},
}
}
24 changes: 21 additions & 3 deletions internal/test/cluster.go
Original file line number Diff line number Diff line change
Expand Up @@ -47,11 +47,29 @@ func NewClusterSpec(opts ...ClusterSpecOpt) *cluster.Spec {
}
s.VersionsBundles = map[v1alpha1.KubernetesVersion]*cluster.VersionsBundle{
v1alpha1.Kube119: {
VersionsBundle: &releasev1alpha1.VersionsBundle{},
KubeDistro: &cluster.KubeDistro{},
VersionsBundle: &releasev1alpha1.VersionsBundle{
EksD: releasev1alpha1.EksDRelease{
Name: "kubernetes-1-19-eks-7",
EksDReleaseUrl: "embed:///testdata/release.yaml",
KubeVersion: "1.19",
},
},
KubeDistro: &cluster.KubeDistro{},
},
}
s.Bundles = &releasev1alpha1.Bundles{
Spec: releasev1alpha1.BundlesSpec{
VersionsBundles: []releasev1alpha1.VersionsBundle{
{
EksD: releasev1alpha1.EksDRelease{
Name: "kubernetes-1-19-eks-7",
EksDReleaseUrl: "embed:///testdata/release.yaml",
KubeVersion: "1.19",
},
},
},
},
}
s.Bundles = &releasev1alpha1.Bundles{}
s.EKSARelease = EKSARelease()

for _, opt := range opts {
Expand Down
52 changes: 52 additions & 0 deletions pkg/clustermanager/cluster_manager.go
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,8 @@
eksdv1alpha1 "github.com/aws/eks-distro-build-tooling/release/api/v1alpha1"
etcdv1 "github.com/aws/etcdadm-controller/api/v1beta1"
"github.com/go-logr/logr"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/utils/integer"
clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"
controlplanev1 "sigs.k8s.io/cluster-api/controlplane/kubeadm/api/v1beta1"
Expand Down Expand Up @@ -151,6 +153,7 @@
GetMachineDeployment(ctx context.Context, workerNodeGroupName string, opts ...executables.KubectlOpt) (*clusterv1.MachineDeployment, error)
GetEksdRelease(ctx context.Context, name, namespace, kubeconfigFile string) (*eksdv1alpha1.Release, error)
ListObjects(ctx context.Context, resourceType, namespace, kubeconfig string, list kubernetes.ObjectList) error
GetConfigMap(ctx context.Context, kubeconfigFile, name, namespace string) (*corev1.ConfigMap, error)
}

type Networking interface {
Expand Down Expand Up @@ -1176,6 +1179,16 @@
if err != nil {
return fmt.Errorf("applying bundle spec: %v", err)
}

// We need to update this config map with the new upgrader images whenever we
// apply a new Bundles object to the cluster in order to support in-place upgrades.
cm, err := c.getUpgraderImagesFromBundle(ctx, cluster, clusterSpec)
if err != nil {
return fmt.Errorf("getting upgrader images from bundle: %v", err)
}

Check warning on line 1188 in pkg/clustermanager/cluster_manager.go

View check run for this annotation

Codecov / codecov/patch

pkg/clustermanager/cluster_manager.go#L1187-L1188

Added lines #L1187 - L1188 were not covered by tests
if err = c.clusterClient.Apply(ctx, cluster.KubeconfigFile, cm); err != nil {
taneyland marked this conversation as resolved.
Show resolved Hide resolved
return fmt.Errorf("applying upgrader images config map: %v", err)
}
return nil
}

Expand Down Expand Up @@ -1394,3 +1407,42 @@
func (c *ClusterManager) DeletePackageResources(ctx context.Context, managementCluster *types.Cluster, clusterName string) error {
return c.clusterClient.DeletePackageResources(ctx, managementCluster, clusterName)
}

func (c *ClusterManager) getUpgraderImagesFromBundle(ctx context.Context, cluster *types.Cluster, cl *cluster.Spec) (*corev1.ConfigMap, error) {
upgraderImages := make(map[string]string)
for _, versionBundle := range cl.Bundles.Spec.VersionsBundles {
eksD := versionBundle.EksD
eksdVersion := fmt.Sprintf("%s-eks-%s-%s", eksD.KubeVersion, eksD.ReleaseChannel, strings.Split(eksD.Name, "-")[4])
if _, ok := upgraderImages[eksdVersion]; !ok {
upgraderImages[eksdVersion] = versionBundle.Upgrader.Upgrader.URI
}
}

upgraderConfigMap, err := c.clusterClient.GetConfigMap(ctx, cluster.KubeconfigFile, constants.UpgraderConfigMapName, constants.EksaSystemNamespace)
if err != nil {
if executables.IsKubectlNotFoundError(err) {
return newUpgraderConfigMap(upgraderImages), nil
}
return nil, err

Check warning on line 1426 in pkg/clustermanager/cluster_manager.go

View check run for this annotation

Codecov / codecov/patch

pkg/clustermanager/cluster_manager.go#L1426

Added line #L1426 was not covered by tests
}

for version, image := range upgraderImages {
upgraderConfigMap.Data[version] = image
}

return upgraderConfigMap, nil
}

func newUpgraderConfigMap(m map[string]string) *corev1.ConfigMap {
return &corev1.ConfigMap{
TypeMeta: metav1.TypeMeta{
APIVersion: "v1",
Kind: "ConfigMap",
},
ObjectMeta: metav1.ObjectMeta{
Name: constants.UpgraderConfigMapName,
Namespace: constants.EksaSystemNamespace,
},
Data: m,
}
}
Loading