Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Backport #6674: Prevent bare metal machine config references from changing to existing machine configs #6686

Merged
merged 1 commit into from
Sep 18, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 2 additions & 2 deletions pkg/collection/set.go
Original file line number Diff line number Diff line change
Expand Up @@ -48,8 +48,8 @@ func (s Set[T]) ToSlice() []T {
return keys
}

// MapSet allows to map a collection to a Set using a closure to extract
// the values of type T.
// MapSet converts c to a new set. f is used to extract the value for representing each element
// of c.
func MapSet[G any, T comparable](c []G, f func(G) T) Set[T] {
s := NewSet[T]()
for _, element := range c {
Expand Down
146 changes: 83 additions & 63 deletions pkg/providers/tinkerbell/upgrade.go
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,7 @@ import (
rufiov1 "github.com/tinkerbell/rufio/api/v1alpha1"
tinkv1alpha1 "github.com/tinkerbell/tink/pkg/apis/core/v1alpha1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
kerrors "k8s.io/apimachinery/pkg/util/errors"

"github.com/aws/eks-anywhere/pkg/api/v1alpha1"
"github.com/aws/eks-anywhere/pkg/cluster"
Expand Down Expand Up @@ -230,69 +231,117 @@ func (p *Provider) RunPostControlPlaneUpgrade(ctx context.Context, oldClusterSpe

// ValidateNewSpec satisfies the Provider interface.
func (p *Provider) ValidateNewSpec(ctx context.Context, cluster *types.Cluster, clusterSpec *cluster.Spec) error {
prevSpec, err := p.providerKubectlClient.GetEksaCluster(ctx, cluster, clusterSpec.Cluster.Name)
desiredClstrSpec := clusterSpec

currentClstr, err := p.providerKubectlClient.GetEksaCluster(ctx, cluster, desiredClstrSpec.Cluster.Name)
if err != nil {
return err
}

prevDatacenterConfig, err := p.providerKubectlClient.GetEksaTinkerbellDatacenterConfig(ctx,
prevSpec.Spec.DatacenterRef.Name, cluster.KubeconfigFile, prevSpec.Namespace)
currentDCName := currentClstr.Spec.DatacenterRef.Name
currentDCCfg, err := p.providerKubectlClient.GetEksaTinkerbellDatacenterConfig(ctx, currentDCName, cluster.KubeconfigFile, currentClstr.Namespace)
if err != nil {
return err
}

oSpec := prevDatacenterConfig.Spec
nSpec := p.datacenterConfig.Spec
currentWNGs := currentClstr.Spec.WorkerNodeGroupConfigurations
desiredWNGs := desiredClstrSpec.Cluster.Spec.WorkerNodeGroupConfigurations

err = p.validateMachineCfgsImmutability(ctx, cluster, currentClstr, desiredClstrSpec, currentWNGs, desiredWNGs)
if err != nil {
return err
}

prevMachineConfigRefs := machineRefSliceToMap(prevSpec.MachineConfigRefs())
desiredDCCfgSpec := p.datacenterConfig.Spec

desiredWorkerNodeGroups := clusterSpec.Cluster.Spec.WorkerNodeGroupConfigurations
currentWorkerNodeGroups := collection.ToMap(prevSpec.Spec.WorkerNodeGroupConfigurations,
func(v v1alpha1.WorkerNodeGroupConfiguration) string { return v.Name })
if desiredDCCfgSpec.TinkerbellIP != currentDCCfg.Spec.TinkerbellIP {
return fmt.Errorf("spec.tinkerbellIP is immutable; previous = %s, new = %s",
currentDCCfg.Spec.TinkerbellIP, desiredDCCfgSpec.TinkerbellIP)
}

// Gather machine config references for new worker node groups so we can avoid immutability
// checks.
newWorkerNodeGroupsRefs := collection.NewSet[string]()
for _, wng := range desiredWorkerNodeGroups {
if _, exists := currentWorkerNodeGroups[wng.Name]; !exists {
newWorkerNodeGroupsRefs.Add(wng.MachineGroupRef.Name)
// for any operation other than k8s version change, hookImageURL is immutable
if currentClstr.Spec.KubernetesVersion == desiredClstrSpec.Cluster.Spec.KubernetesVersion {
if desiredDCCfgSpec.OSImageURL != currentDCCfg.Spec.OSImageURL {
return fmt.Errorf("spec.OSImageURL is immutable. Previous value %s, New value %s", currentDCCfg.Spec.OSImageURL, desiredDCCfgSpec.OSImageURL)
}
Comment on lines +264 to 266
Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

In release-0.17 we validate the OSImageURL is unchanged (see removed l.287). In mainline we removed this check to accommodate upgrades to Ubuntu 22.04.

if desiredDCCfgSpec.HookImagesURLPath != currentDCCfg.Spec.HookImagesURLPath {
return fmt.Errorf("spec.hookImagesURLPath is immutable. previoius = %s, new = %s",
currentDCCfg.Spec.HookImagesURLPath, desiredDCCfgSpec.HookImagesURLPath)
}
}

return nil
}

func (p *Provider) validateMachineCfgsImmutability(ctx context.Context, clstr *types.Cluster, currentClstr *v1alpha1.Cluster, desiredClstrSpec *cluster.Spec, currentWNGs, desiredWNGs []v1alpha1.WorkerNodeGroupConfiguration) error {
currentCPMachineRef := currentClstr.Spec.ControlPlaneConfiguration.MachineGroupRef
desiredCPMachineRef := desiredClstrSpec.Cluster.Spec.ControlPlaneConfiguration.MachineGroupRef
if !currentCPMachineRef.Equal(desiredCPMachineRef) {
return errors.New("control plane machine config reference is immutable")
}

for _, machineConfigRef := range clusterSpec.Cluster.MachineConfigRefs() {
machineConfig, ok := p.machineConfigs[machineConfigRef.Name]
err := validateRefsUnchanged(currentWNGs, desiredWNGs)
if err != nil {
return err
}

currentMachineCfgRefsMap := p.machineConfigs

currentWNGsSet := collection.MapSet(currentWNGs, func(v v1alpha1.WorkerNodeGroupConfiguration) string {
return v.Name
})

// newWNGs contains the set of worker node group names specified in the desired spec that are new.
newWNGs := collection.NewSet[string]()
for _, wng := range desiredWNGs {
if !currentWNGsSet.Contains(wng.Name) {
newWNGs.Add(wng.MachineGroupRef.Name)
}
}

for _, machineCfgRef := range desiredClstrSpec.Cluster.MachineConfigRefs() {
machineCfg, ok := currentMachineCfgRefsMap[machineCfgRef.Name]
if !ok {
return fmt.Errorf("cannot find machine config %s in tinkerbell provider machine configs", machineConfigRef.Name)
return fmt.Errorf("cannot find machine config %s in tinkerbell provider machine configs", machineCfgRef.Name)
}

// If the machien config reference is for a new worker node group don't bother with
// If the machine config reference is for a new worker node group don't bother with
// immutability checks as we want users to be able to add worker node groups.
if !newWorkerNodeGroupsRefs.Contains(machineConfigRef.Name) {
if _, ok = prevMachineConfigRefs[machineConfig.Name]; !ok {
return fmt.Errorf("cannot add or remove MachineConfigs as part of upgrade")
if !newWNGs.Contains(machineCfgRef.Name) {
if _, has := currentMachineCfgRefsMap[machineCfg.Name]; !has {
return fmt.Errorf("cannot change machine config references")
}
err = p.validateMachineConfigImmutability(ctx, cluster, machineConfig, clusterSpec)
err := p.validateMachineCfg(ctx, clstr, machineCfg, desiredClstrSpec)
if err != nil {
return err
}
}
}

if nSpec.TinkerbellIP != oSpec.TinkerbellIP {
return fmt.Errorf("spec.TinkerbellIP is immutable. Previous value %s, New value %s", oSpec.TinkerbellIP, nSpec.TinkerbellIP)
}
return nil
}

func validateRefsUnchanged(current, desired []v1alpha1.WorkerNodeGroupConfiguration) error {
lookup := collection.ToMap(desired, func(v v1alpha1.WorkerNodeGroupConfiguration) string {
return v.Name
})

var errs []error

// for any operation other than k8s version change, osImageURL and hookImageURL are immutable
if prevSpec.Spec.KubernetesVersion == clusterSpec.Cluster.Spec.KubernetesVersion {
if nSpec.OSImageURL != oSpec.OSImageURL {
return fmt.Errorf("spec.OSImageURL is immutable. Previous value %s, New value %s", oSpec.OSImageURL, nSpec.OSImageURL)
// For every current worker node group that still exists in the desired config, ensure the
// machine config is still the same.
for _, curr := range current {
desi, exists := lookup[curr.Name]
if !exists {
continue
}
if nSpec.HookImagesURLPath != oSpec.HookImagesURLPath {
return fmt.Errorf("spec.HookImagesURLPath is immutable. Previous value %s, New value %s", oSpec.HookImagesURLPath, nSpec.HookImagesURLPath)

if !curr.MachineGroupRef.Equal(desi.MachineGroupRef) {
errs = append(errs, fmt.Errorf("%v: worker node group machine config reference is immutable", curr.Name))
}
}

return nil
return kerrors.NewAggregate(errs)
}

func (p *Provider) UpgradeNeeded(_ context.Context, _, _ *cluster.Spec, _ *types.Cluster) (bool, error) {
Expand Down Expand Up @@ -325,28 +374,7 @@ func (p *Provider) isScaleUpDown(oldCluster *v1alpha1.Cluster, newCluster *v1alp
return false
}

/* func (p *Provider) isScaleUpDown(currentSpec *cluster.Spec, newSpec *cluster.Spec) bool {
if currentSpec.Cluster.Spec.ControlPlaneConfiguration.Count != newSpec.Cluster.Spec.ControlPlaneConfiguration.Count {
return true
}

workerNodeGroupMap := make(map[string]*v1alpha1.WorkerNodeGroupConfiguration)
for _, workerNodeGroupConfiguration := range currentSpec.Cluster.Spec.WorkerNodeGroupConfigurations {
workerNodeGroupMap[workerNodeGroupConfiguration.Name] = &workerNodeGroupConfiguration
}

for _, nodeGroupNewSpec := range newSpec.Cluster.Spec.WorkerNodeGroupConfigurations {
if workerNodeGrpOldSpec, ok := workerNodeGroupMap[nodeGroupNewSpec.Name]; ok {
if *nodeGroupNewSpec.Count != *workerNodeGrpOldSpec.Count {
return true
}
}
}

return false
} */

func (p *Provider) validateMachineConfigImmutability(ctx context.Context, cluster *types.Cluster, newConfig *v1alpha1.TinkerbellMachineConfig, clusterSpec *cluster.Spec) error {
func (p *Provider) validateMachineCfg(ctx context.Context, cluster *types.Cluster, newConfig *v1alpha1.TinkerbellMachineConfig, clusterSpec *cluster.Spec) error {
prevMachineConfig, err := p.providerKubectlClient.GetEksaTinkerbellMachineConfig(ctx, newConfig.Name, cluster.KubeconfigFile, clusterSpec.Cluster.Namespace)
if err != nil {
return err
Expand All @@ -371,14 +399,6 @@ func (p *Provider) validateMachineConfigImmutability(ctx context.Context, cluste
return nil
}

func machineRefSliceToMap(machineRefs []v1alpha1.Ref) map[string]v1alpha1.Ref {
refMap := make(map[string]v1alpha1.Ref, len(machineRefs))
for _, ref := range machineRefs {
refMap[ref.Name] = ref
}
return refMap
}

// PreCoreComponentsUpgrade staisfies the Provider interface.
func (p *Provider) PreCoreComponentsUpgrade(
ctx context.Context,
Expand Down
86 changes: 83 additions & 3 deletions pkg/providers/tinkerbell/upgrade_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -799,6 +799,86 @@ func TestProvider_ValidateNewSpec_ChangeWorkerNodeGroupMachineRef(t *testing.T)
docker, helm, kubectl)
provider.stackInstaller = stackInstaller

kubectl.EXPECT().
GetEksaCluster(ctx, desiredClusterSpec.ManagementCluster,
desiredClusterSpec.Cluster.Spec.ManagementCluster.Name).
Return(currentClusterSpec.Cluster, nil)
kubectl.EXPECT().
GetEksaTinkerbellDatacenterConfig(ctx, currentClusterSpec.Cluster.Spec.DatacenterRef.Name,
cluster.KubeconfigFile, currentClusterSpec.Cluster.Namespace).
Return(datacenterConfig, nil)

err := provider.ValidateNewSpec(ctx, desiredClusterSpec.ManagementCluster, desiredClusterSpec)
if err == nil || !strings.Contains(err.Error(), "worker node group machine config reference is immutable") {
t.Fatalf("Expected error containing 'worker node group machine config reference is immutable' but received: %v", err)
}
}

func TestProvider_ValidateNewSpec_ChangeControlPlaneMachineRefToExisting(t *testing.T) {
clusterSpecManifest := "cluster_tinkerbell_stacked_etcd.yaml"
mockCtrl := gomock.NewController(t)
currentClusterSpec := givenClusterSpec(t, clusterSpecManifest)
datacenterConfig := givenDatacenterConfig(t, clusterSpecManifest)
machineConfigs := givenMachineConfigs(t, clusterSpecManifest)
docker := stackmocks.NewMockDocker(mockCtrl)
helm := stackmocks.NewMockHelm(mockCtrl)
kubectl := mocks.NewMockProviderKubectlClient(mockCtrl)
stackInstaller := stackmocks.NewMockStackInstaller(mockCtrl)
writer := filewritermocks.NewMockFileWriter(mockCtrl)
ctx := context.Background()

cluster := &types.Cluster{Name: "test", KubeconfigFile: "kubeconfig-file"}
currentClusterSpec.ManagementCluster = cluster

desiredClusterSpec := currentClusterSpec.DeepCopy()

// Change an existing worker node groups machine config reference to an existing machine config.
desiredClusterSpec.Cluster.Spec.ControlPlaneConfiguration.MachineGroupRef.Name = "test-md"

provider := newTinkerbellProvider(datacenterConfig, machineConfigs, currentClusterSpec.Cluster, writer,
docker, helm, kubectl)
provider.stackInstaller = stackInstaller

kubectl.EXPECT().
GetEksaCluster(ctx, desiredClusterSpec.ManagementCluster,
desiredClusterSpec.Cluster.Spec.ManagementCluster.Name).
Return(currentClusterSpec.Cluster, nil)
kubectl.EXPECT().
GetEksaTinkerbellDatacenterConfig(ctx, currentClusterSpec.Cluster.Spec.DatacenterRef.Name,
cluster.KubeconfigFile, currentClusterSpec.Cluster.Namespace).
Return(datacenterConfig, nil)

err := provider.ValidateNewSpec(ctx, desiredClusterSpec.ManagementCluster, desiredClusterSpec)
if err == nil || !strings.Contains(err.Error(), "control plane machine config reference is immutable") {
t.Fatalf("Expected error containing 'control plane machine config reference is immutable' but received: %v", err)
}
}

func TestProvider_ValidateNewSpec_ChangeWorkerNodeGroupMachineRefToExisting(t *testing.T) {
clusterSpecManifest := "cluster_tinkerbell_stacked_etcd.yaml"
mockCtrl := gomock.NewController(t)
currentClusterSpec := givenClusterSpec(t, clusterSpecManifest)
datacenterConfig := givenDatacenterConfig(t, clusterSpecManifest)
machineConfigs := givenMachineConfigs(t, clusterSpecManifest)
docker := stackmocks.NewMockDocker(mockCtrl)
helm := stackmocks.NewMockHelm(mockCtrl)
kubectl := mocks.NewMockProviderKubectlClient(mockCtrl)
stackInstaller := stackmocks.NewMockStackInstaller(mockCtrl)
writer := filewritermocks.NewMockFileWriter(mockCtrl)
ctx := context.Background()

cluster := &types.Cluster{Name: "test", KubeconfigFile: "kubeconfig-file"}
currentClusterSpec.ManagementCluster = cluster

desiredClusterSpec := currentClusterSpec.DeepCopy()

// Change an existing worker node groups machine config reference to an existing machine config.
desiredClusterSpec.Cluster.Spec.WorkerNodeGroupConfigurations[0].MachineGroupRef.Name = "test-cp"

provider := newTinkerbellProvider(datacenterConfig, machineConfigs, currentClusterSpec.Cluster, writer,
docker, helm, kubectl)
provider.stackInstaller = stackInstaller

kubectl.EXPECT().
GetEksaCluster(ctx, desiredClusterSpec.ManagementCluster,
desiredClusterSpec.Cluster.Spec.ManagementCluster.Name).
Expand All @@ -820,8 +900,8 @@ func TestProvider_ValidateNewSpec_ChangeWorkerNodeGroupMachineRef(t *testing.T)
AnyTimes()

err := provider.ValidateNewSpec(ctx, desiredClusterSpec.ManagementCluster, desiredClusterSpec)
if err == nil || !strings.Contains(err.Error(), "cannot add or remove MachineConfigs") {
t.Fatalf("Expected error containing 'cannot add or remove MachineConfigs' but received: %v", err)
if err == nil || !strings.Contains(err.Error(), "worker node group machine config reference is immutable") {
t.Fatalf("Expected error containing 'worker node group machine config reference is immutable' but received: %v", err)
}
}

Expand All @@ -845,7 +925,7 @@ func TestProvider_ValidateNewSpec_NewWorkerNodeGroup(t *testing.T) {

// Add an extra worker node group to the desired configuration with its associated machine
// config. The machine configs are plumbed in via the Tinkerbell provider constructor func.
newMachineCfgName := "additiona-machine-config"
newMachineCfgName := "additional-machine-config"
newWorkerNodeGroupName := "additional-worker-node-group"
desiredWorkerNodeGroups := &desiredClusterSpec.Cluster.Spec.WorkerNodeGroupConfigurations
*desiredWorkerNodeGroups = append(*desiredWorkerNodeGroups, v1alpha1.WorkerNodeGroupConfiguration{
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -201,7 +201,7 @@ func TestPreflightValidationsTinkerbell(t *testing.T) {
workerResponse: nil,
nodeResponse: nil,
crdResponse: nil,
wantErr: composeError("spec.TinkerbellIP is immutable. Previous value 4.5.6.7, New value 1.2.3.4"),
wantErr: composeError("spec.tinkerbellIP is immutable; previous = 4.5.6.7, new = 1.2.3.4"),
modifyDatacenterFunc: func(s *anywherev1.TinkerbellDatacenterConfig) {
s.Spec.TinkerbellIP = "4.5.6.7"
},
Expand Down