diff --git a/controllers/controlplaneupgrade_controller_test.go b/controllers/controlplaneupgrade_controller_test.go index 0b8f7003fb77e..6fff6edde8f07 100644 --- a/controllers/controlplaneupgrade_controller_test.go +++ b/controllers/controlplaneupgrade_controller_test.go @@ -205,7 +205,7 @@ func getObjectsForCPUpgradeTest() (*clusterv1.Cluster, []*clusterv1.Machine, []* machines := []*clusterv1.Machine{machine1, machine2} nodes := []*corev1.Node{node1, node2} nodeUpgrades := []*anywherev1.NodeUpgrade{nodeUpgrade1, nodeUpgrade2} - cpUpgrade := generateCPUpgrade(machines, cluster) + cpUpgrade := generateCPUpgrade(machines) return cluster, machines, nodes, cpUpgrade, nodeUpgrades } @@ -218,7 +218,7 @@ func cpUpgradeRequest(cpUpgrade *anywherev1.ControlPlaneUpgrade) reconcile.Reque } } -func generateCPUpgrade(machine []*clusterv1.Machine, cluster *clusterv1.Cluster) *anywherev1.ControlPlaneUpgrade { +func generateCPUpgrade(machine []*clusterv1.Machine) *anywherev1.ControlPlaneUpgrade { etcdVersion := "v1.28.3-eks-1-28-9" return &anywherev1.ControlPlaneUpgrade{ ObjectMeta: metav1.ObjectMeta{ diff --git a/controllers/machinedeploymentupgrade_controller_test.go b/controllers/machinedeploymentupgrade_controller_test.go index 6e4f9f6de1984..3aa5feb6bfda5 100644 --- a/controllers/machinedeploymentupgrade_controller_test.go +++ b/controllers/machinedeploymentupgrade_controller_test.go @@ -146,7 +146,7 @@ func getObjectsForMDUpgradeTest() (*clusterv1.Cluster, *clusterv1.Machine, *core node := generateNode() machine := generateMachine(cluster, node) nodeUpgrade := generateNodeUpgrade(machine) - mdUpgrade := generateMDUpgrade(machine, cluster) + mdUpgrade := generateMDUpgrade(machine) return cluster, machine, node, mdUpgrade, nodeUpgrade } @@ -159,7 +159,7 @@ func mdUpgradeRequest(mdUpgrade *anywherev1.MachineDeploymentUpgrade) reconcile. } } -func generateMDUpgrade(machine *clusterv1.Machine, cluster *clusterv1.Cluster) *anywherev1.MachineDeploymentUpgrade { +func generateMDUpgrade(machine *clusterv1.Machine) *anywherev1.MachineDeploymentUpgrade { return &anywherev1.MachineDeploymentUpgrade{ ObjectMeta: metav1.ObjectMeta{ Name: "md-upgrade-request", diff --git a/controllers/nodeupgrade_controller.go b/controllers/nodeupgrade_controller.go index bdb1166651fc9..5a4d9ec24cf99 100644 --- a/controllers/nodeupgrade_controller.go +++ b/controllers/nodeupgrade_controller.go @@ -6,6 +6,7 @@ import ( "time" "github.com/go-logr/logr" + "github.com/pkg/errors" corev1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -25,10 +26,8 @@ import ( ) const ( - // TODO(in-place): Get this image from the bundle instead of using the hardcoded one. - defaultUpgraderImage = "public.ecr.aws/t2p4l7v3/upgrader:eksdbase" - controlPlaneLabel = "node-role.kubernetes.io/control-plane" - podDNEMessage = "Upgrader pod does not exist" + controlPlaneLabel = "node-role.kubernetes.io/control-plane" + podDNEMessage = "Upgrader pod does not exist" // nodeUpgradeFinalizerName is the finalizer added to NodeUpgrade objects to handle deletion. nodeUpgradeFinalizerName = "nodeupgrades.anywhere.eks.amazonaws.com/finalizer" @@ -168,14 +167,23 @@ func (r *NodeUpgradeReconciler) reconcile(ctx context.Context, log logr.Logger, return ctrl.Result{}, nil } + configMap := &corev1.ConfigMap{} + if err := remoteClient.Get(ctx, types.NamespacedName{Name: constants.UpgraderConfigMapName, Namespace: constants.EksaSystemNamespace}, configMap); err != nil { + return ctrl.Result{}, err + } + if configMap.Data == nil { + return ctrl.Result{}, errors.New("upgrader config map is empty") + } + upgraderImage := configMap.Data[nodeUpgrade.Spec.KubernetesVersion] + if isControlPlane(node) { if nodeUpgrade.Spec.FirstNodeToBeUpgraded { - upgraderPod = upgrader.UpgradeFirstControlPlanePod(node.Name, defaultUpgraderImage, nodeUpgrade.Spec.KubernetesVersion, *nodeUpgrade.Spec.EtcdVersion) + upgraderPod = upgrader.UpgradeFirstControlPlanePod(node.Name, upgraderImage, nodeUpgrade.Spec.KubernetesVersion, *nodeUpgrade.Spec.EtcdVersion) } else { - upgraderPod = upgrader.UpgradeSecondaryControlPlanePod(node.Name, defaultUpgraderImage) + upgraderPod = upgrader.UpgradeSecondaryControlPlanePod(node.Name, upgraderImage) } } else { - upgraderPod = upgrader.UpgradeWorkerPod(node.Name, defaultUpgraderImage) + upgraderPod = upgrader.UpgradeWorkerPod(node.Name, upgraderImage) } if err := remoteClient.Create(ctx, upgraderPod); err != nil { diff --git a/controllers/nodeupgrade_controller_test.go b/controllers/nodeupgrade_controller_test.go index 98ff008e7face..4e7a5b63ca758 100644 --- a/controllers/nodeupgrade_controller_test.go +++ b/controllers/nodeupgrade_controller_test.go @@ -26,13 +26,13 @@ func TestNodeUpgradeReconcilerReconcileFirstControlPlane(t *testing.T) { ctrl := gomock.NewController(t) clientRegistry := mocks.NewMockRemoteClientRegistry(ctrl) - cluster, machine, node, nodeUpgrade := getObjectsForNodeUpgradeTest() + cluster, machine, node, nodeUpgrade, configMap := getObjectsForNodeUpgradeTest() nodeUpgrade.Spec.FirstNodeToBeUpgraded = true nodeUpgrade.Spec.EtcdVersion = ptr.String("v3.5.9-eks-1-28-9") node.Labels = map[string]string{ "node-role.kubernetes.io/control-plane": "true", } - client := fake.NewClientBuilder().WithRuntimeObjects(cluster, machine, node, nodeUpgrade).Build() + client := fake.NewClientBuilder().WithRuntimeObjects(cluster, machine, node, nodeUpgrade, configMap).Build() clientRegistry.EXPECT().GetClient(ctx, types.NamespacedName{Name: cluster.Name, Namespace: cluster.Namespace}).Return(client, nil) @@ -52,11 +52,11 @@ func TestNodeUpgradeReconcilerReconcileNextControlPlane(t *testing.T) { ctrl := gomock.NewController(t) clientRegistry := mocks.NewMockRemoteClientRegistry(ctrl) - cluster, machine, node, nodeUpgrade := getObjectsForNodeUpgradeTest() + cluster, machine, node, nodeUpgrade, configMap := getObjectsForNodeUpgradeTest() node.Labels = map[string]string{ "node-role.kubernetes.io/control-plane": "true", } - client := fake.NewClientBuilder().WithRuntimeObjects(cluster, machine, node, nodeUpgrade).Build() + client := fake.NewClientBuilder().WithRuntimeObjects(cluster, machine, node, nodeUpgrade, configMap).Build() clientRegistry.EXPECT().GetClient(ctx, types.NamespacedName{Name: cluster.Name, Namespace: cluster.Namespace}).Return(client, nil) @@ -76,8 +76,8 @@ func TestNodeUpgradeReconcilerReconcileWorker(t *testing.T) { ctrl := gomock.NewController(t) clientRegistry := mocks.NewMockRemoteClientRegistry(ctrl) - cluster, machine, node, nodeUpgrade := getObjectsForNodeUpgradeTest() - client := fake.NewClientBuilder().WithRuntimeObjects(cluster, machine, node, nodeUpgrade).Build() + cluster, machine, node, nodeUpgrade, configMap := getObjectsForNodeUpgradeTest() + client := fake.NewClientBuilder().WithRuntimeObjects(cluster, machine, node, nodeUpgrade, configMap).Build() clientRegistry.EXPECT().GetClient(ctx, types.NamespacedName{Name: cluster.Name, Namespace: cluster.Namespace}).Return(client, nil) @@ -97,8 +97,8 @@ func TestNodeUpgradeReconcilerReconcileCreateUpgraderPodState(t *testing.T) { ctrl := gomock.NewController(t) clientRegistry := mocks.NewMockRemoteClientRegistry(ctrl) - cluster, machine, node, nodeUpgrade := getObjectsForNodeUpgradeTest() - client := fake.NewClientBuilder().WithRuntimeObjects(cluster, machine, node, nodeUpgrade).Build() + cluster, machine, node, nodeUpgrade, configMap := getObjectsForNodeUpgradeTest() + client := fake.NewClientBuilder().WithRuntimeObjects(cluster, machine, node, nodeUpgrade, configMap).Build() clientRegistry.EXPECT().GetClient(ctx, types.NamespacedName{Name: cluster.Name, Namespace: cluster.Namespace}).Return(client, nil).Times(2) @@ -158,8 +158,8 @@ func TestNodeUpgradeReconcilerReconcileDelete(t *testing.T) { ctrl := gomock.NewController(t) clientRegistry := mocks.NewMockRemoteClientRegistry(ctrl) - cluster, machine, node, nodeUpgrade := getObjectsForNodeUpgradeTest() - client := fake.NewClientBuilder().WithRuntimeObjects(cluster, machine, node, nodeUpgrade).Build() + cluster, machine, node, nodeUpgrade, configMap := getObjectsForNodeUpgradeTest() + client := fake.NewClientBuilder().WithRuntimeObjects(cluster, machine, node, nodeUpgrade, configMap).Build() clientRegistry.EXPECT().GetClient(ctx, types.NamespacedName{Name: cluster.Name, Namespace: cluster.Namespace}).Return(client, nil).Times(2) @@ -189,8 +189,8 @@ func TestNodeUpgradeReconcilerReconcileDeleteUpgraderPodAlreadyDeleted(t *testin ctrl := gomock.NewController(t) clientRegistry := mocks.NewMockRemoteClientRegistry(ctrl) - cluster, machine, node, nodeUpgrade := getObjectsForNodeUpgradeTest() - client := fake.NewClientBuilder().WithRuntimeObjects(cluster, machine, node, nodeUpgrade).Build() + cluster, machine, node, nodeUpgrade, configMap := getObjectsForNodeUpgradeTest() + client := fake.NewClientBuilder().WithRuntimeObjects(cluster, machine, node, nodeUpgrade, configMap).Build() clientRegistry.EXPECT().GetClient(ctx, types.NamespacedName{Name: cluster.Name, Namespace: cluster.Namespace}).Return(client, nil).Times(2) @@ -217,12 +217,13 @@ func TestNodeUpgradeReconcilerReconcileDeleteUpgraderPodAlreadyDeleted(t *testin g.Expect(err).To(MatchError("pods \"node01-node-upgrader\" not found")) } -func getObjectsForNodeUpgradeTest() (*clusterv1.Cluster, *clusterv1.Machine, *corev1.Node, *anywherev1.NodeUpgrade) { +func getObjectsForNodeUpgradeTest() (*clusterv1.Cluster, *clusterv1.Machine, *corev1.Node, *anywherev1.NodeUpgrade, *corev1.ConfigMap) { cluster := generateCluster() node := generateNode() machine := generateMachine(cluster, node) nodeUpgrade := generateNodeUpgrade(machine) - return cluster, machine, node, nodeUpgrade + configMap := generateConfigMap() + return cluster, machine, node, nodeUpgrade, configMap } func nodeUpgradeRequest(nodeUpgrade *anywherev1.NodeUpgrade) reconcile.Request { @@ -284,3 +285,13 @@ func generateCluster() *clusterv1.Cluster { }, } } + +func generateConfigMap() *corev1.ConfigMap { + return &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: "in-place-upgrade", + Namespace: "eksa-system", + }, + Data: map[string]string{"v1.28.1": "test"}, + } +} diff --git a/internal/test/cluster.go b/internal/test/cluster.go index a930f513497ba..d440920e277f3 100644 --- a/internal/test/cluster.go +++ b/internal/test/cluster.go @@ -47,11 +47,29 @@ func NewClusterSpec(opts ...ClusterSpecOpt) *cluster.Spec { } s.VersionsBundles = map[v1alpha1.KubernetesVersion]*cluster.VersionsBundle{ v1alpha1.Kube119: { - VersionsBundle: &releasev1alpha1.VersionsBundle{}, - KubeDistro: &cluster.KubeDistro{}, + VersionsBundle: &releasev1alpha1.VersionsBundle{ + EksD: releasev1alpha1.EksDRelease{ + Name: "kubernetes-1-19-eks-7", + EksDReleaseUrl: "embed:///testdata/release.yaml", + KubeVersion: "1.19", + }, + }, + KubeDistro: &cluster.KubeDistro{}, + }, + } + s.Bundles = &releasev1alpha1.Bundles{ + Spec: releasev1alpha1.BundlesSpec{ + VersionsBundles: []releasev1alpha1.VersionsBundle{ + { + EksD: releasev1alpha1.EksDRelease{ + Name: "kubernetes-1-19-eks-7", + EksDReleaseUrl: "embed:///testdata/release.yaml", + KubeVersion: "1.19", + }, + }, + }, }, } - s.Bundles = &releasev1alpha1.Bundles{} s.EKSARelease = EKSARelease() for _, opt := range opts { diff --git a/pkg/clustermanager/cluster_manager.go b/pkg/clustermanager/cluster_manager.go index 9c0e2b19ee2e4..d34b8c8fba1be 100644 --- a/pkg/clustermanager/cluster_manager.go +++ b/pkg/clustermanager/cluster_manager.go @@ -16,6 +16,8 @@ import ( eksdv1alpha1 "github.com/aws/eks-distro-build-tooling/release/api/v1alpha1" etcdv1 "github.com/aws/etcdadm-controller/api/v1beta1" "github.com/go-logr/logr" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/utils/integer" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" controlplanev1 "sigs.k8s.io/cluster-api/controlplane/kubeadm/api/v1beta1" @@ -151,6 +153,7 @@ type ClusterClient interface { GetMachineDeployment(ctx context.Context, workerNodeGroupName string, opts ...executables.KubectlOpt) (*clusterv1.MachineDeployment, error) GetEksdRelease(ctx context.Context, name, namespace, kubeconfigFile string) (*eksdv1alpha1.Release, error) ListObjects(ctx context.Context, resourceType, namespace, kubeconfig string, list kubernetes.ObjectList) error + GetConfigMap(ctx context.Context, kubeconfigFile, name, namespace string) (*corev1.ConfigMap, error) } type Networking interface { @@ -1176,6 +1179,16 @@ func (c *ClusterManager) ApplyBundles(ctx context.Context, clusterSpec *cluster. if err != nil { return fmt.Errorf("applying bundle spec: %v", err) } + + // We need to update this config map with the new upgrader images whenever we + // apply a new Bundles object to the cluster in order to support in-place upgrades. + cm, err := c.getUpgraderImagesFromBundle(ctx, cluster, clusterSpec) + if err != nil { + return fmt.Errorf("getting upgrader images from bundle: %v", err) + } + if err = c.clusterClient.Apply(ctx, cluster.KubeconfigFile, cm); err != nil { + return fmt.Errorf("applying upgrader images config map: %v", err) + } return nil } @@ -1394,3 +1407,44 @@ func (c *ClusterManager) buildSpecForCluster(ctx context.Context, clus *types.Cl func (c *ClusterManager) DeletePackageResources(ctx context.Context, managementCluster *types.Cluster, clusterName string) error { return c.clusterClient.DeletePackageResources(ctx, managementCluster, clusterName) } + +func (c *ClusterManager) getUpgraderImagesFromBundle(ctx context.Context, cluster *types.Cluster, cl *cluster.Spec) (*corev1.ConfigMap, error) { + upgraderImages := make(map[string]string) + for _, versionBundle := range cl.Bundles.Spec.VersionsBundles { + eksD := versionBundle.EksD + eksdVersion := fmt.Sprintf("%s-eks-%s-%s", eksD.KubeVersion, eksD.ReleaseChannel, strings.Split(eksD.Name, "-")[4]) + if _, ok := upgraderImages[eksdVersion]; !ok { + upgraderImages[eksdVersion] = versionBundle.Upgrader.Upgrader.URI + } + } + + upgraderConfigMap, err := c.clusterClient.GetConfigMap(ctx, cluster.KubeconfigFile, constants.UpgraderConfigMapName, constants.EksaSystemNamespace) + if err != nil { + if executables.IsKubectlNotFoundError(err) { + return newUpgraderConfigMap(upgraderImages), nil + } + return nil, err + } + + for version, image := range upgraderImages { + if _, ok := upgraderConfigMap.Data[version]; !ok { + upgraderConfigMap.Data[version] = image + } + } + + return upgraderConfigMap, nil +} + +func newUpgraderConfigMap(m map[string]string) *corev1.ConfigMap { + return &corev1.ConfigMap{ + TypeMeta: metav1.TypeMeta{ + APIVersion: "v1", + Kind: "ConfigMap", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: constants.UpgraderConfigMapName, + Namespace: constants.EksaSystemNamespace, + }, + Data: m, + } +} diff --git a/pkg/clustermanager/cluster_manager_test.go b/pkg/clustermanager/cluster_manager_test.go index 9f35826273fb4..33bb65bcc308e 100644 --- a/pkg/clustermanager/cluster_manager_test.go +++ b/pkg/clustermanager/cluster_manager_test.go @@ -12,6 +12,7 @@ import ( "github.com/golang/mock/gomock" . "github.com/onsi/gomega" "github.com/stretchr/testify/assert" + corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" controlplanev1 "sigs.k8s.io/cluster-api/controlplane/kubeadm/api/v1beta1" @@ -1772,11 +1773,27 @@ func TestClusterManagerCreateEKSAResourcesSuccess(t *testing.T) { datacenterConfig := &v1alpha1.VSphereDatacenterConfig{} machineConfigs := []providers.MachineConfig{} + configMap := &corev1.ConfigMap{ + TypeMeta: metav1.TypeMeta{ + APIVersion: "v1", + Kind: "ConfigMap", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: constants.UpgraderConfigMapName, + Namespace: constants.EksaSystemNamespace, + }, + Data: map[string]string{ + "v1.28": "test-image", + }, + } + c, m := newClusterManager(t) m.client.EXPECT().ApplyKubeSpecFromBytes(ctx, tt.cluster, gomock.Any()) // ApplyKubeSpecFromBytes is called twice. Once for Bundles and again for EKSARelease. m.client.EXPECT().ApplyKubeSpecFromBytes(ctx, tt.cluster, gomock.Any()).MaxTimes(2) + m.client.EXPECT().GetConfigMap(ctx, tt.cluster.KubeconfigFile, gomock.Any(), gomock.Any()).Return(configMap, nil) + m.client.EXPECT().Apply(ctx, tt.cluster.KubeconfigFile, gomock.Any()) m.client.EXPECT().ApplyKubeSpecFromBytesWithNamespace(ctx, tt.cluster, gomock.Any(), gomock.Any()).MaxTimes(2) tt.Expect(c.CreateEKSAResources(ctx, tt.cluster, tt.clusterSpec, datacenterConfig, machineConfigs)).To(Succeed()) _, ok := datacenterConfig.GetAnnotations()["anywhere.eks.amazonaws.com/paused"] @@ -1841,6 +1858,20 @@ func TestClusterManagerCreateEKSAResourcesFailureEKSARelease(t *testing.T) { datacenterConfig := &v1alpha1.VSphereDatacenterConfig{} machineConfigs := []providers.MachineConfig{} + configMap := &corev1.ConfigMap{ + TypeMeta: metav1.TypeMeta{ + APIVersion: "v1", + Kind: "ConfigMap", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: constants.UpgraderConfigMapName, + Namespace: constants.EksaSystemNamespace, + }, + Data: map[string]string{ + "v1.28": "test-image", + }, + } + mockCtrl := gomock.NewController(t) m := &clusterManagerMocks{ writer: mockswriter.NewMockFileWriter(mockCtrl), @@ -1859,12 +1890,102 @@ func TestClusterManagerCreateEKSAResourcesFailureEKSARelease(t *testing.T) { c := clustermanager.New(cf, client, m.networking, m.writer, m.diagnosticsFactory, m.awsIamAuth, m.eksaComponents) m.client.EXPECT().CreateNamespaceIfNotPresent(ctx, gomock.Any(), tt.clusterSpec.Cluster.Namespace).Return(nil) + m.client.EXPECT().GetConfigMap(ctx, tt.cluster.KubeconfigFile, gomock.Any(), gomock.Any()).Return(configMap, nil) + m.client.EXPECT().Apply(ctx, tt.cluster.KubeconfigFile, gomock.Any()) m.client.EXPECT().ApplyKubeSpecFromBytes(ctx, gomock.Any(), gomock.Any()).Return(nil) m.client.EXPECT().ApplyKubeSpecFromBytes(ctx, gomock.Any(), gomock.Any()).Return(nil) m.client.EXPECT().ApplyKubeSpecFromBytes(ctx, gomock.Any(), gomock.Any()).Return(errors.New("")) tt.Expect(c.CreateEKSAResources(ctx, tt.cluster, tt.clusterSpec, datacenterConfig, machineConfigs)).NotTo(Succeed()) } +func TestClusterManagerCreateEKSAResourcesNewUpgraderConfigMap(t *testing.T) { + features.ClearCache() + ctx := context.Background() + tt := newTest(t) + tt.clusterSpec.Cluster.Namespace = "test_namespace" + + datacenterConfig := &v1alpha1.VSphereDatacenterConfig{} + machineConfigs := []providers.MachineConfig{} + + mockCtrl := gomock.NewController(t) + m := &clusterManagerMocks{ + writer: mockswriter.NewMockFileWriter(mockCtrl), + networking: mocksmanager.NewMockNetworking(mockCtrl), + awsIamAuth: mocksmanager.NewMockAwsIamAuth(mockCtrl), + client: mocksmanager.NewMockClusterClient(mockCtrl), + provider: mocksprovider.NewMockProvider(mockCtrl), + diagnosticsFactory: mocksdiagnostics.NewMockDiagnosticBundleFactory(mockCtrl), + diagnosticsBundle: mocksdiagnostics.NewMockDiagnosticBundle(mockCtrl), + eksaComponents: mocksmanager.NewMockEKSAComponents(mockCtrl), + } + client := clustermanager.NewRetrierClient(m.client, retrier.NewWithMaxRetries(1, 1)) + fakeClient := test.NewFakeKubeClient() + cf := mocksmanager.NewMockClientFactory(mockCtrl) + cf.EXPECT().BuildClientFromKubeconfig("").Return(fakeClient, nil).AnyTimes() + c := clustermanager.New(cf, client, m.networking, m.writer, m.diagnosticsFactory, m.awsIamAuth, m.eksaComponents) + + m.client.EXPECT().CreateNamespaceIfNotPresent(ctx, gomock.Any(), tt.clusterSpec.Cluster.Namespace).Return(nil) + m.client.EXPECT().ApplyKubeSpecFromBytes(ctx, tt.cluster, gomock.Any()) + // ApplyKubeSpecFromBytes is called twice. Once for Bundles and again for EKSARelease. + m.client.EXPECT().ApplyKubeSpecFromBytes(ctx, tt.cluster, gomock.Any()).MaxTimes(2) + m.client.EXPECT().GetConfigMap(ctx, tt.cluster.KubeconfigFile, gomock.Any(), gomock.Any()).Return(nil, errors.New("NotFound")) + m.client.EXPECT().Apply(ctx, tt.cluster.KubeconfigFile, gomock.Any()) + m.client.EXPECT().ApplyKubeSpecFromBytesWithNamespace(ctx, tt.cluster, gomock.Any(), gomock.Any()).MaxTimes(2) + tt.Expect(c.CreateEKSAResources(ctx, tt.cluster, tt.clusterSpec, datacenterConfig, machineConfigs)).To(Succeed()) + _, ok := datacenterConfig.GetAnnotations()["anywhere.eks.amazonaws.com/paused"] + tt.Expect(ok).To(BeTrue()) + _, ok = tt.clusterSpec.Cluster.GetAnnotations()["anywhere.eks.amazonaws.com/paused"] + tt.Expect(ok).To(BeTrue()) +} + +func TestClusterManagerCreateEKSAResourcesFailureApplyUpgraderConfigMap(t *testing.T) { + features.ClearCache() + ctx := context.Background() + tt := newTest(t) + tt.clusterSpec.Cluster.Namespace = "test_namespace" + + datacenterConfig := &v1alpha1.VSphereDatacenterConfig{} + machineConfigs := []providers.MachineConfig{} + + configMap := &corev1.ConfigMap{ + TypeMeta: metav1.TypeMeta{ + APIVersion: "v1", + Kind: "ConfigMap", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: constants.UpgraderConfigMapName, + Namespace: constants.EksaSystemNamespace, + }, + Data: map[string]string{ + "v1.28": "test-image", + }, + } + + mockCtrl := gomock.NewController(t) + m := &clusterManagerMocks{ + writer: mockswriter.NewMockFileWriter(mockCtrl), + networking: mocksmanager.NewMockNetworking(mockCtrl), + awsIamAuth: mocksmanager.NewMockAwsIamAuth(mockCtrl), + client: mocksmanager.NewMockClusterClient(mockCtrl), + provider: mocksprovider.NewMockProvider(mockCtrl), + diagnosticsFactory: mocksdiagnostics.NewMockDiagnosticBundleFactory(mockCtrl), + diagnosticsBundle: mocksdiagnostics.NewMockDiagnosticBundle(mockCtrl), + eksaComponents: mocksmanager.NewMockEKSAComponents(mockCtrl), + } + client := clustermanager.NewRetrierClient(m.client, retrier.NewWithMaxRetries(1, 1)) + fakeClient := test.NewFakeKubeClient() + cf := mocksmanager.NewMockClientFactory(mockCtrl) + cf.EXPECT().BuildClientFromKubeconfig("").Return(fakeClient, nil).AnyTimes() + c := clustermanager.New(cf, client, m.networking, m.writer, m.diagnosticsFactory, m.awsIamAuth, m.eksaComponents) + + m.client.EXPECT().CreateNamespaceIfNotPresent(ctx, gomock.Any(), tt.clusterSpec.Cluster.Namespace).Return(nil) + m.client.EXPECT().GetConfigMap(ctx, tt.cluster.KubeconfigFile, gomock.Any(), gomock.Any()).Return(configMap, nil) + m.client.EXPECT().Apply(ctx, tt.cluster.KubeconfigFile, gomock.Any()).Return(errors.New("")) + m.client.EXPECT().ApplyKubeSpecFromBytes(ctx, gomock.Any(), gomock.Any()).Return(nil) + m.client.EXPECT().ApplyKubeSpecFromBytes(ctx, gomock.Any(), gomock.Any()).Return(nil) + tt.Expect(c.CreateEKSAResources(ctx, tt.cluster, tt.clusterSpec, datacenterConfig, machineConfigs)).NotTo(Succeed()) +} + func expectedMachineHealthCheck(unhealthyMachineTimeout, nodeStartupTimeout time.Duration) []byte { healthCheck := fmt.Sprintf(`apiVersion: cluster.x-k8s.io/v1beta1 kind: MachineHealthCheck diff --git a/pkg/clustermanager/mocks/client_and_networking.go b/pkg/clustermanager/mocks/client_and_networking.go index ce856b73bf7a8..a4277e6716ec5 100644 --- a/pkg/clustermanager/mocks/client_and_networking.go +++ b/pkg/clustermanager/mocks/client_and_networking.go @@ -19,6 +19,7 @@ import ( v1alpha11 "github.com/aws/eks-distro-build-tooling/release/api/v1alpha1" logr "github.com/go-logr/logr" gomock "github.com/golang/mock/gomock" + v1 "k8s.io/api/core/v1" runtime "k8s.io/apimachinery/pkg/runtime" v1beta1 "sigs.k8s.io/cluster-api/api/v1beta1" v1beta10 "sigs.k8s.io/cluster-api/controlplane/kubeadm/api/v1beta1" @@ -295,6 +296,21 @@ func (mr *MockClusterClientMockRecorder) GetClusters(arg0, arg1 interface{}) *go return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetClusters", reflect.TypeOf((*MockClusterClient)(nil).GetClusters), arg0, arg1) } +// GetConfigMap mocks base method. +func (m *MockClusterClient) GetConfigMap(arg0 context.Context, arg1, arg2, arg3 string) (*v1.ConfigMap, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetConfigMap", arg0, arg1, arg2, arg3) + ret0, _ := ret[0].(*v1.ConfigMap) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetConfigMap indicates an expected call of GetConfigMap. +func (mr *MockClusterClientMockRecorder) GetConfigMap(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetConfigMap", reflect.TypeOf((*MockClusterClient)(nil).GetConfigMap), arg0, arg1, arg2, arg3) +} + // GetEksaAWSIamConfig mocks base method. func (m *MockClusterClient) GetEksaAWSIamConfig(arg0 context.Context, arg1, arg2, arg3 string) (*v1alpha1.AWSIamConfig, error) { m.ctrl.T.Helper() diff --git a/pkg/constants/constants.go b/pkg/constants/constants.go index 605aeff0cfcfc..c1333cb8f3a24 100644 --- a/pkg/constants/constants.go +++ b/pkg/constants/constants.go @@ -46,6 +46,8 @@ const ( NutanixCredentialsName = "nutanix-credentials" EksaLicenseName = "eksa-license" EksaPackagesName = "eksa-packages" + // UpgraderConfigMapName is the name of config map that stores the upgrader images. + UpgraderConfigMapName = "in-place-upgrade" CloudstackAnnotationSuffix = "cloudstack.anywhere.eks.amazonaws.com/v1alpha1" diff --git a/pkg/executables/kubectl.go b/pkg/executables/kubectl.go index 9e3a612c741cc..4e350c771536f 100644 --- a/pkg/executables/kubectl.go +++ b/pkg/executables/kubectl.go @@ -2179,7 +2179,8 @@ func isKubectlAlreadyExistsError(err error) bool { const notFoundErrorMessageSubString = "NotFound" -func isKubectlNotFoundError(err error) bool { +// IsKubectlNotFoundError returns true if the kubectl call returned the NotFound error. +func IsKubectlNotFoundError(err error) bool { return err != nil && strings.Contains(err.Error(), notFoundErrorMessageSubString) } @@ -2285,7 +2286,7 @@ func (k *Kubectl) Delete(ctx context.Context, resourceType, kubeconfig string, o params := deleteParams(resourceType, kubeconfig, o) _, err := k.Execute(ctx, params...) - if isKubectlNotFoundError(err) { + if IsKubectlNotFoundError(err) { return newNotFoundErrorForTypeAndName(resourceType, o.Name) }