Skip to content

Commit

Permalink
Prevent bare metal machine config references from changing
Browse files Browse the repository at this point in the history
In bare metal clusters users shouldn't be able to change machine
configs. However, EKS-A incorrectly allowed users to change the machine
config references to existing machine configs.
  • Loading branch information
chrisdoherty4 committed Sep 11, 2023
1 parent 5126575 commit 8ac7392
Show file tree
Hide file tree
Showing 2 changed files with 134 additions and 12 deletions.
62 changes: 52 additions & 10 deletions pkg/providers/tinkerbell/upgrade.go
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,7 @@ import (
rufiov1 "github.com/tinkerbell/rufio/api/v1alpha1"
tinkv1alpha1 "github.com/tinkerbell/tink/pkg/apis/core/v1alpha1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
kerrors "k8s.io/apimachinery/pkg/util/errors"

"github.com/aws/eks-anywhere/pkg/api/v1alpha1"
"github.com/aws/eks-anywhere/pkg/cluster"
Expand Down Expand Up @@ -244,7 +245,21 @@ func (p *Provider) ValidateNewSpec(ctx context.Context, cluster *types.Cluster,
oSpec := prevDatacenterConfig.Spec
nSpec := p.datacenterConfig.Spec

prevMachineConfigRefs := machineRefSliceToMap(prevSpec.MachineConfigRefs())
err = validateControlPlaneMachineConfigRefUnchanged(prevSpec, clusterSpec.Cluster)
if err != nil {
return err
}

err = validateWorkerNodeGroupMachineConfigRefsUnchanged(prevSpec.Spec.WorkerNodeGroupConfigurations,
clusterSpec.Cluster.Spec.WorkerNodeGroupConfigurations)
if err != nil {
return err
}

// Validate worker node group machine configs are the same.
prevMachineConfigRefs := collection.ToMap(prevSpec.MachineConfigRefs(), func(v v1alpha1.Ref) string {
return v.Name
})

desiredWorkerNodeGroups := clusterSpec.Cluster.Spec.WorkerNodeGroupConfigurations
currentWorkerNodeGroups := collection.ToMap(prevSpec.Spec.WorkerNodeGroupConfigurations,
Expand Down Expand Up @@ -279,7 +294,7 @@ func (p *Provider) ValidateNewSpec(ctx context.Context, cluster *types.Cluster,
}

if nSpec.TinkerbellIP != oSpec.TinkerbellIP {
return fmt.Errorf("spec.TinkerbellIP is immutable. Previous value %s, New value %s", oSpec.TinkerbellIP, nSpec.TinkerbellIP)
return fmt.Errorf("spec.tinkerbellIP is immutable; previous = %s, new = %s", oSpec.TinkerbellIP, nSpec.TinkerbellIP)
}

// for any operation other than k8s version change, hookImageURL is immutable
Expand All @@ -292,6 +307,41 @@ func (p *Provider) ValidateNewSpec(ctx context.Context, cluster *types.Cluster,
return nil
}

func validateControlPlaneMachineConfigRefUnchanged(current, desired *v1alpha1.Cluster) error {
curr := current.Spec.ControlPlaneConfiguration.MachineGroupRef
desi := desired.Spec.ControlPlaneConfiguration.MachineGroupRef

if !curr.Equal(desi) {
return errors.New("control plane machine config reference is immutable")
}

return nil
}

func validateWorkerNodeGroupMachineConfigRefsUnchanged(current, desired []v1alpha1.WorkerNodeGroupConfiguration) error {
lookup := collection.ToMap(desired, func(v v1alpha1.WorkerNodeGroupConfiguration) string {
return v.Name
})

var errs []error

// For every current worker node group that still exists in the desired config, ensure the
// machine config is still the same.
for _, curr := range current {
desi, exists := lookup[curr.Name]
if !exists {
continue
}

if !curr.MachineGroupRef.Equal(desi.MachineGroupRef) {
errs = append(errs,
fmt.Errorf("%v: worker node group machine config reference is immutable", curr.Name))
}
}

return kerrors.NewAggregate(errs)
}

func (p *Provider) UpgradeNeeded(_ context.Context, _, _ *cluster.Spec, _ *types.Cluster) (bool, error) {
// TODO: Figure out if something is needed here
return false, nil
Expand Down Expand Up @@ -368,14 +418,6 @@ func (p *Provider) validateMachineConfigImmutability(ctx context.Context, cluste
return nil
}

func machineRefSliceToMap(machineRefs []v1alpha1.Ref) map[string]v1alpha1.Ref {
refMap := make(map[string]v1alpha1.Ref, len(machineRefs))
for _, ref := range machineRefs {
refMap[ref.Name] = ref
}
return refMap
}

// PreCoreComponentsUpgrade staisfies the Provider interface.
func (p *Provider) PreCoreComponentsUpgrade(
ctx context.Context,
Expand Down
84 changes: 82 additions & 2 deletions pkg/providers/tinkerbell/upgrade_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -799,6 +799,86 @@ func TestProvider_ValidateNewSpec_ChangeWorkerNodeGroupMachineRef(t *testing.T)
docker, helm, kubectl)
provider.stackInstaller = stackInstaller

kubectl.EXPECT().
GetEksaCluster(ctx, desiredClusterSpec.ManagementCluster,
desiredClusterSpec.Cluster.Spec.ManagementCluster.Name).
Return(currentClusterSpec.Cluster, nil)
kubectl.EXPECT().
GetEksaTinkerbellDatacenterConfig(ctx, currentClusterSpec.Cluster.Spec.DatacenterRef.Name,
cluster.KubeconfigFile, currentClusterSpec.Cluster.Namespace).
Return(datacenterConfig, nil)

err := provider.ValidateNewSpec(ctx, desiredClusterSpec.ManagementCluster, desiredClusterSpec)
if err == nil || !strings.Contains(err.Error(), "cannot add or remove MachineConfigs") {
t.Fatalf("Expected error containing 'cannot add or remove MachineConfigs' but received: %v", err)
}
}

func TestProvider_ValidateNewSpec_ChangeControlPlaneMachineRefToExisting(t *testing.T) {
clusterSpecManifest := "cluster_tinkerbell_stacked_etcd.yaml"
mockCtrl := gomock.NewController(t)
currentClusterSpec := givenClusterSpec(t, clusterSpecManifest)
datacenterConfig := givenDatacenterConfig(t, clusterSpecManifest)
machineConfigs := givenMachineConfigs(t, clusterSpecManifest)
docker := stackmocks.NewMockDocker(mockCtrl)
helm := stackmocks.NewMockHelm(mockCtrl)
kubectl := mocks.NewMockProviderKubectlClient(mockCtrl)
stackInstaller := stackmocks.NewMockStackInstaller(mockCtrl)
writer := filewritermocks.NewMockFileWriter(mockCtrl)
ctx := context.Background()

cluster := &types.Cluster{Name: "test", KubeconfigFile: "kubeconfig-file"}
currentClusterSpec.ManagementCluster = cluster

desiredClusterSpec := currentClusterSpec.DeepCopy()

// Change an existing worker node groups machine config reference to an existing machine config.
desiredClusterSpec.Cluster.Spec.ControlPlaneConfiguration.MachineGroupRef.Name = "test-md"

provider := newTinkerbellProvider(datacenterConfig, machineConfigs, currentClusterSpec.Cluster, writer,
docker, helm, kubectl)
provider.stackInstaller = stackInstaller

kubectl.EXPECT().
GetEksaCluster(ctx, desiredClusterSpec.ManagementCluster,
desiredClusterSpec.Cluster.Spec.ManagementCluster.Name).
Return(currentClusterSpec.Cluster, nil)
kubectl.EXPECT().
GetEksaTinkerbellDatacenterConfig(ctx, currentClusterSpec.Cluster.Spec.DatacenterRef.Name,
cluster.KubeconfigFile, currentClusterSpec.Cluster.Namespace).
Return(datacenterConfig, nil)

err := provider.ValidateNewSpec(ctx, desiredClusterSpec.ManagementCluster, desiredClusterSpec)
if err == nil || !strings.Contains(err.Error(), "control plane machine config reference is immutable") {
t.Fatalf("Expected error containing 'control plane machine config reference is immutable' but received: %v", err)
}
}

func TestProvider_ValidateNewSpec_ChangeWorkerNodeGroupMachineRefToExisting(t *testing.T) {
clusterSpecManifest := "cluster_tinkerbell_stacked_etcd.yaml"
mockCtrl := gomock.NewController(t)
currentClusterSpec := givenClusterSpec(t, clusterSpecManifest)
datacenterConfig := givenDatacenterConfig(t, clusterSpecManifest)
machineConfigs := givenMachineConfigs(t, clusterSpecManifest)
docker := stackmocks.NewMockDocker(mockCtrl)
helm := stackmocks.NewMockHelm(mockCtrl)
kubectl := mocks.NewMockProviderKubectlClient(mockCtrl)
stackInstaller := stackmocks.NewMockStackInstaller(mockCtrl)
writer := filewritermocks.NewMockFileWriter(mockCtrl)
ctx := context.Background()

cluster := &types.Cluster{Name: "test", KubeconfigFile: "kubeconfig-file"}
currentClusterSpec.ManagementCluster = cluster

desiredClusterSpec := currentClusterSpec.DeepCopy()

// Change an existing worker node groups machine config reference to an existing machine config.
desiredClusterSpec.Cluster.Spec.WorkerNodeGroupConfigurations[0].MachineGroupRef.Name = "test-cp"

provider := newTinkerbellProvider(datacenterConfig, machineConfigs, currentClusterSpec.Cluster, writer,
docker, helm, kubectl)
provider.stackInstaller = stackInstaller

kubectl.EXPECT().
GetEksaCluster(ctx, desiredClusterSpec.ManagementCluster,
desiredClusterSpec.Cluster.Spec.ManagementCluster.Name).
Expand All @@ -820,8 +900,8 @@ func TestProvider_ValidateNewSpec_ChangeWorkerNodeGroupMachineRef(t *testing.T)
AnyTimes()

err := provider.ValidateNewSpec(ctx, desiredClusterSpec.ManagementCluster, desiredClusterSpec)
if err == nil || !strings.Contains(err.Error(), "cannot add or remove MachineConfigs") {
t.Fatalf("Expected error containing 'cannot add or remove MachineConfigs' but received: %v", err)
if err == nil || !strings.Contains(err.Error(), "worker node group machine config reference is immutable") {
t.Fatalf("Expected error containing 'worker node group machine config reference is immutable' but received: %v", err)
}
}

Expand Down

0 comments on commit 8ac7392

Please sign in to comment.