diff --git a/api/v1beta2/awscluster_types.go b/api/v1beta2/awscluster_types.go index 1df6c53b89..8ca1409109 100644 --- a/api/v1beta2/awscluster_types.go +++ b/api/v1beta2/awscluster_types.go @@ -169,11 +169,10 @@ type Bastion struct { type LoadBalancerType string var ( - LoadBalancerTypeClassic = LoadBalancerType("classic") - LoadBalancerTypeELB = LoadBalancerType("elb") - LoadBalancerTypeALB = LoadBalancerType("alb") - LoadBalancerTypeNLB = LoadBalancerType("nlb") - LoadBalancerTypeDisabled = LoadBalancerType("disabled") + LoadBalancerTypeClassic = LoadBalancerType("classic") + LoadBalancerTypeELB = LoadBalancerType("elb") + LoadBalancerTypeALB = LoadBalancerType("alb") + LoadBalancerTypeNLB = LoadBalancerType("nlb") ) // AWSLoadBalancerSpec defines the desired state of an AWS load balancer. @@ -232,7 +231,7 @@ type AWSLoadBalancerSpec struct { // LoadBalancerType sets the type for a load balancer. The default type is classic. // +kubebuilder:default=classic - // +kubebuilder:validation:Enum:=classic;elb;alb;nlb;disabled + // +kubebuilder:validation:Enum:=classic;elb;alb;nlb LoadBalancerType LoadBalancerType `json:"loadBalancerType,omitempty"` // DisableHostsRewrite disabled the hair pinning issue solution that adds the NLB's address as 127.0.0.1 to the hosts diff --git a/api/v1beta2/awscluster_webhook.go b/api/v1beta2/awscluster_webhook.go index 4e1a2dbb12..bbc3eb6a8f 100644 --- a/api/v1beta2/awscluster_webhook.go +++ b/api/v1beta2/awscluster_webhook.go @@ -298,49 +298,5 @@ func (r *AWSCluster) validateControlPlaneLBs() field.ErrorList { } } - if r.Spec.ControlPlaneLoadBalancer.LoadBalancerType == LoadBalancerTypeDisabled { - if r.Spec.ControlPlaneLoadBalancer.Name != nil { - allErrs = append(allErrs, field.Invalid(field.NewPath("spec", "controlPlaneLoadBalancer", "name"), r.Spec.ControlPlaneLoadBalancer.Name, "cannot configure a name if the LoadBalancer reconciliation is disabled")) - } - - if r.Spec.ControlPlaneLoadBalancer.CrossZoneLoadBalancing { - allErrs = append(allErrs, field.Invalid(field.NewPath("spec", "controlPlaneLoadBalancer", "crossZoneLoadBalancing"), r.Spec.ControlPlaneLoadBalancer.CrossZoneLoadBalancing, "cross-zone load balancing cannot be set if the LoadBalancer reconciliation is disabled")) - } - - if len(r.Spec.ControlPlaneLoadBalancer.Subnets) > 0 { - allErrs = append(allErrs, field.Invalid(field.NewPath("spec", "controlPlaneLoadBalancer", "subnets"), r.Spec.ControlPlaneLoadBalancer.Subnets, "subnets cannot be set if the LoadBalancer reconciliation is disabled")) - } - - if r.Spec.ControlPlaneLoadBalancer.HealthCheckProtocol != nil { - allErrs = append(allErrs, field.Invalid(field.NewPath("spec", "controlPlaneLoadBalancer", "healthCheckProtocol"), r.Spec.ControlPlaneLoadBalancer.HealthCheckProtocol, "healthcheck protocol cannot be set if the LoadBalancer reconciliation is disabled")) - } - - if len(r.Spec.ControlPlaneLoadBalancer.AdditionalSecurityGroups) > 0 { - allErrs = append(allErrs, field.Invalid(field.NewPath("spec", "controlPlaneLoadBalancer", "additionalSecurityGroups"), r.Spec.ControlPlaneLoadBalancer.AdditionalSecurityGroups, "additional Security Groups cannot be set if the LoadBalancer reconciliation is disabled")) - } - - if len(r.Spec.ControlPlaneLoadBalancer.AdditionalListeners) > 0 { - allErrs = append(allErrs, field.Invalid(field.NewPath("spec", "controlPlaneLoadBalancer", "additionalListeners"), r.Spec.ControlPlaneLoadBalancer.AdditionalListeners, "cannot set additional listeners if the LoadBalancer reconciliation is disabled")) - } - - if len(r.Spec.ControlPlaneLoadBalancer.IngressRules) > 0 { - allErrs = append(allErrs, field.Invalid(field.NewPath("spec", "controlPlaneLoadBalancer", "ingressRules"), r.Spec.ControlPlaneLoadBalancer.IngressRules, "ingress rules cannot be set if the LoadBalancer reconciliation is disabled")) - } - - if r.Spec.ControlPlaneLoadBalancer.PreserveClientIP { - allErrs = append(allErrs, field.Invalid(field.NewPath("spec", "controlPlaneLoadBalancer", "preserveClientIP"), r.Spec.ControlPlaneLoadBalancer.PreserveClientIP, "cannot preserve client IP if the LoadBalancer reconciliation is disabled")) - } - - if r.Spec.ControlPlaneLoadBalancer.DisableHostsRewrite { - allErrs = append(allErrs, field.Invalid(field.NewPath("spec", "controlPlaneLoadBalancer", "disableHostsRewrite"), r.Spec.ControlPlaneLoadBalancer.DisableHostsRewrite, "cannot disable hosts rewrite if the LoadBalancer reconciliation is disabled")) - } - } - - for _, rule := range r.Spec.ControlPlaneLoadBalancer.IngressRules { - if (rule.CidrBlocks != nil || rule.IPv6CidrBlocks != nil) && (rule.SourceSecurityGroupIDs != nil || rule.SourceSecurityGroupRoles != nil) { - allErrs = append(allErrs, field.Invalid(field.NewPath("spec", "controlPlaneLoadBalancer", "ingressRules"), r.Spec.ControlPlaneLoadBalancer.IngressRules, "CIDR blocks and security group IDs or security group roles cannot be used together")) - } - } - return allErrs } diff --git a/api/v1beta2/awscluster_webhook_test.go b/api/v1beta2/awscluster_webhook_test.go index 85342552c6..d0883a61ff 100644 --- a/api/v1beta2/awscluster_webhook_test.go +++ b/api/v1beta2/awscluster_webhook_test.go @@ -26,7 +26,6 @@ import ( . "github.com/onsi/gomega" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" utilfeature "k8s.io/component-base/featuregate/testing" - "k8s.io/utils/ptr" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/cluster-api-provider-aws/v2/feature" @@ -51,126 +50,6 @@ func TestAWSClusterValidateCreate(t *testing.T) { wantErr bool expect func(g *WithT, res *AWSLoadBalancerSpec) }{ - { - name: "No options are allowed when LoadBalancer is disabled (name)", - cluster: &AWSCluster{ - Spec: AWSClusterSpec{ - ControlPlaneLoadBalancer: &AWSLoadBalancerSpec{ - LoadBalancerType: LoadBalancerTypeDisabled, - Name: ptr.To("name"), - }, - }, - }, - wantErr: true, - }, - { - name: "No options are allowed when LoadBalancer is disabled (crossZoneLoadBalancing)", - cluster: &AWSCluster{ - Spec: AWSClusterSpec{ - ControlPlaneLoadBalancer: &AWSLoadBalancerSpec{ - CrossZoneLoadBalancing: true, - LoadBalancerType: LoadBalancerTypeDisabled, - }, - }, - }, - wantErr: true, - }, - { - name: "No options are allowed when LoadBalancer is disabled (subnets)", - cluster: &AWSCluster{ - Spec: AWSClusterSpec{ - ControlPlaneLoadBalancer: &AWSLoadBalancerSpec{ - Subnets: []string{"foo", "bar"}, - LoadBalancerType: LoadBalancerTypeDisabled, - }, - }, - }, - wantErr: true, - }, - { - name: "No options are allowed when LoadBalancer is disabled (healthCheckProtocol)", - cluster: &AWSCluster{ - Spec: AWSClusterSpec{ - ControlPlaneLoadBalancer: &AWSLoadBalancerSpec{ - HealthCheckProtocol: &ELBProtocolTCP, - LoadBalancerType: LoadBalancerTypeDisabled, - }, - }, - }, - wantErr: true, - }, - { - name: "No options are allowed when LoadBalancer is disabled (additionalSecurityGroups)", - cluster: &AWSCluster{ - Spec: AWSClusterSpec{ - ControlPlaneLoadBalancer: &AWSLoadBalancerSpec{ - AdditionalSecurityGroups: []string{"foo", "bar"}, - LoadBalancerType: LoadBalancerTypeDisabled, - }, - }, - }, - wantErr: true, - }, - { - name: "No options are allowed when LoadBalancer is disabled (additionalListeners)", - cluster: &AWSCluster{ - Spec: AWSClusterSpec{ - ControlPlaneLoadBalancer: &AWSLoadBalancerSpec{ - AdditionalListeners: []AdditionalListenerSpec{ - { - Port: 6443, - Protocol: ELBProtocolTCP, - }, - }, - LoadBalancerType: LoadBalancerTypeDisabled, - }, - }, - }, - wantErr: true, - }, - { - name: "No options are allowed when LoadBalancer is disabled (ingressRules)", - cluster: &AWSCluster{ - Spec: AWSClusterSpec{ - ControlPlaneLoadBalancer: &AWSLoadBalancerSpec{ - IngressRules: []IngressRule{ - { - Description: "ingress rule", - Protocol: SecurityGroupProtocolTCP, - FromPort: 6443, - ToPort: 6443, - }, - }, - LoadBalancerType: LoadBalancerTypeDisabled, - }, - }, - }, - wantErr: true, - }, - { - name: "No options are allowed when LoadBalancer is disabled (disableHostsRewrite)", - cluster: &AWSCluster{ - Spec: AWSClusterSpec{ - ControlPlaneLoadBalancer: &AWSLoadBalancerSpec{ - DisableHostsRewrite: true, - LoadBalancerType: LoadBalancerTypeDisabled, - }, - }, - }, - wantErr: true, - }, - { - name: "No options are allowed when LoadBalancer is disabled (preserveClientIP)", - cluster: &AWSCluster{ - Spec: AWSClusterSpec{ - ControlPlaneLoadBalancer: &AWSLoadBalancerSpec{ - PreserveClientIP: true, - LoadBalancerType: LoadBalancerTypeDisabled, - }, - }, - }, - wantErr: true, - }, // The SSHKeyName tests were moved to sshkeyname_test.go { name: "Supported schemes are 'internet-facing, Internet-facing, internal, or nil', rest will be rejected", diff --git a/api/v1beta2/conditions_consts.go b/api/v1beta2/conditions_consts.go index bfbb96c77a..9cd1870a99 100644 --- a/api/v1beta2/conditions_consts.go +++ b/api/v1beta2/conditions_consts.go @@ -125,9 +125,6 @@ const ( LoadBalancerReadyCondition clusterv1.ConditionType = "LoadBalancerReady" // WaitForDNSNameReason used while waiting for a DNS name for the API server to be populated. WaitForDNSNameReason = "WaitForDNSName" - // WaitForExternalControlPlaneEndpointReason is available when the AWS Cluster is waiting for an externally managed - // Load Balancer, such as an external Control Plane provider. - WaitForExternalControlPlaneEndpointReason = "WaitForExternalControlPlaneEndpoint" // WaitForDNSNameResolveReason used while waiting for DNS name to resolve. WaitForDNSNameResolveReason = "WaitForDNSNameResolve" // LoadBalancerFailedReason used when an error occurs during load balancer reconciliation. diff --git a/config/crd/bases/infrastructure.cluster.x-k8s.io_awsclusters.yaml b/config/crd/bases/infrastructure.cluster.x-k8s.io_awsclusters.yaml index a74b9c7e82..80aaf648b0 100644 --- a/config/crd/bases/infrastructure.cluster.x-k8s.io_awsclusters.yaml +++ b/config/crd/bases/infrastructure.cluster.x-k8s.io_awsclusters.yaml @@ -1110,7 +1110,6 @@ spec: - elb - alb - nlb - - disabled type: string name: description: Name sets the name of the classic ELB load balancer. @@ -1692,7 +1691,6 @@ spec: - elb - alb - nlb - - disabled type: string name: description: Name sets the name of the classic ELB load balancer. diff --git a/config/crd/bases/infrastructure.cluster.x-k8s.io_awsclustertemplates.yaml b/config/crd/bases/infrastructure.cluster.x-k8s.io_awsclustertemplates.yaml index df369e0c2d..f9b4564dfd 100644 --- a/config/crd/bases/infrastructure.cluster.x-k8s.io_awsclustertemplates.yaml +++ b/config/crd/bases/infrastructure.cluster.x-k8s.io_awsclustertemplates.yaml @@ -709,7 +709,6 @@ spec: - elb - alb - nlb - - disabled type: string name: description: Name sets the name of the classic ELB load @@ -1321,7 +1320,6 @@ spec: - elb - alb - nlb - - disabled type: string name: description: Name sets the name of the classic ELB load diff --git a/config/rbac/role.yaml b/config/rbac/role.yaml index 33946b3057..754cb96fd9 100644 --- a/config/rbac/role.yaml +++ b/config/rbac/role.yaml @@ -119,14 +119,6 @@ rules: - get - list - watch -- apiGroups: - - controlplane.cluster.x-k8s.io - resources: - - '*' - verbs: - - get - - list - - watch - apiGroups: - controlplane.cluster.x-k8s.io resources: diff --git a/controllers/awscluster_controller.go b/controllers/awscluster_controller.go index 13db38000a..a3e0368d03 100644 --- a/controllers/awscluster_controller.go +++ b/controllers/awscluster_controller.go @@ -266,45 +266,6 @@ func (r *AWSClusterReconciler) reconcileDelete(ctx context.Context, clusterScope return nil } -func (r *AWSClusterReconciler) reconcileLoadBalancer(clusterScope *scope.ClusterScope, awsCluster *infrav1.AWSCluster) (*time.Duration, error) { - retryAfterDuration := 15 * time.Second - if clusterScope.AWSCluster.Spec.ControlPlaneLoadBalancer.LoadBalancerType == infrav1.LoadBalancerTypeDisabled { - clusterScope.Debug("load balancer reconciliation shifted to external provider, checking external endpoint") - - return r.checkForExternalControlPlaneLoadBalancer(clusterScope, awsCluster), nil - } - - elbService := r.getELBService(clusterScope) - - if err := elbService.ReconcileLoadbalancers(); err != nil { - clusterScope.Error(err, "failed to reconcile load balancer") - conditions.MarkFalse(awsCluster, infrav1.LoadBalancerReadyCondition, infrav1.LoadBalancerFailedReason, infrautilconditions.ErrorConditionAfterInit(clusterScope.ClusterObj()), err.Error()) - return nil, err - } - - if awsCluster.Status.Network.APIServerELB.DNSName == "" { - conditions.MarkFalse(awsCluster, infrav1.LoadBalancerReadyCondition, infrav1.WaitForDNSNameReason, clusterv1.ConditionSeverityInfo, "") - clusterScope.Info("Waiting on API server ELB DNS name") - return &retryAfterDuration, nil - } - - clusterScope.Debug("Looking up IP address for DNS", "dns", awsCluster.Status.Network.APIServerELB.DNSName) - if _, err := net.LookupIP(awsCluster.Status.Network.APIServerELB.DNSName); err != nil { - clusterScope.Error(err, "failed to get IP address for dns name", "dns", awsCluster.Status.Network.APIServerELB.DNSName) - conditions.MarkFalse(awsCluster, infrav1.LoadBalancerReadyCondition, infrav1.WaitForDNSNameResolveReason, clusterv1.ConditionSeverityInfo, "") - clusterScope.Info("Waiting on API server ELB DNS name to resolve") - return &retryAfterDuration, nil - } - conditions.MarkTrue(awsCluster, infrav1.LoadBalancerReadyCondition) - - awsCluster.Spec.ControlPlaneEndpoint = clusterv1.APIEndpoint{ - Host: awsCluster.Status.Network.APIServerELB.DNSName, - Port: clusterScope.APIServerPort(), - } - - return nil, nil -} - func (r *AWSClusterReconciler) reconcileNormal(clusterScope *scope.ClusterScope) (reconcile.Result, error) { clusterScope.Info("Reconciling AWSCluster") @@ -319,6 +280,7 @@ func (r *AWSClusterReconciler) reconcileNormal(clusterScope *scope.ClusterScope) } ec2Service := r.getEC2Service(clusterScope) + elbService := r.getELBService(clusterScope) networkSvc := r.getNetworkService(*clusterScope) sgService := r.getSecurityGroupService(*clusterScope) s3Service := s3.NewService(clusterScope) @@ -348,10 +310,10 @@ func (r *AWSClusterReconciler) reconcileNormal(clusterScope *scope.ClusterScope) } } - if requeueAfter, err := r.reconcileLoadBalancer(clusterScope, awsCluster); err != nil { + if err := elbService.ReconcileLoadbalancers(); err != nil { + clusterScope.Error(err, "failed to reconcile load balancer") + conditions.MarkFalse(awsCluster, infrav1.LoadBalancerReadyCondition, infrav1.LoadBalancerFailedReason, infrautilconditions.ErrorConditionAfterInit(clusterScope.ClusterObj()), err.Error()) return reconcile.Result{}, err - } else if requeueAfter != nil { - return reconcile.Result{RequeueAfter: *requeueAfter}, err } if err := s3Service.ReconcileBucket(); err != nil { @@ -359,6 +321,26 @@ func (r *AWSClusterReconciler) reconcileNormal(clusterScope *scope.ClusterScope) return reconcile.Result{}, errors.Wrapf(err, "failed to reconcile S3 Bucket for AWSCluster %s/%s", awsCluster.Namespace, awsCluster.Name) } + if awsCluster.Status.Network.APIServerELB.DNSName == "" { + conditions.MarkFalse(awsCluster, infrav1.LoadBalancerReadyCondition, infrav1.WaitForDNSNameReason, clusterv1.ConditionSeverityInfo, "") + clusterScope.Info("Waiting on API server ELB DNS name") + return reconcile.Result{RequeueAfter: 15 * time.Second}, nil + } + + clusterScope.Debug("looking up IP address for DNS", "dns", awsCluster.Status.Network.APIServerELB.DNSName) + if _, err := net.LookupIP(awsCluster.Status.Network.APIServerELB.DNSName); err != nil { + clusterScope.Error(err, "failed to get IP address for dns name", "dns", awsCluster.Status.Network.APIServerELB.DNSName) + conditions.MarkFalse(awsCluster, infrav1.LoadBalancerReadyCondition, infrav1.WaitForDNSNameResolveReason, clusterv1.ConditionSeverityInfo, "") + clusterScope.Info("Waiting on API server ELB DNS name to resolve") + return reconcile.Result{RequeueAfter: 15 * time.Second}, nil + } + conditions.MarkTrue(awsCluster, infrav1.LoadBalancerReadyCondition) + + awsCluster.Spec.ControlPlaneEndpoint = clusterv1.APIEndpoint{ + Host: awsCluster.Status.Network.APIServerELB.DNSName, + Port: clusterScope.APIServerPort(), + } + for _, subnet := range clusterScope.Subnets().FilterPrivate() { found := false for _, az := range awsCluster.Status.Network.APIServerELB.AvailabilityZones { @@ -465,29 +447,3 @@ func (r *AWSClusterReconciler) requeueAWSClusterForUnpausedCluster(_ context.Con } } } - -func (r *AWSClusterReconciler) checkForExternalControlPlaneLoadBalancer(clusterScope *scope.ClusterScope, awsCluster *infrav1.AWSCluster) *time.Duration { - requeueAfterPeriod := 15 * time.Second - - switch { - case len(awsCluster.Spec.ControlPlaneEndpoint.Host) == 0 && awsCluster.Spec.ControlPlaneEndpoint.Port == 0: - clusterScope.Info("AWSCluster control plane endpoint is still non-populated") - conditions.MarkFalse(awsCluster, infrav1.LoadBalancerReadyCondition, infrav1.WaitForExternalControlPlaneEndpointReason, clusterv1.ConditionSeverityInfo, "") - - return &requeueAfterPeriod - case len(awsCluster.Spec.ControlPlaneEndpoint.Host) == 0: - clusterScope.Info("AWSCluster control plane endpoint host is still non-populated") - conditions.MarkFalse(awsCluster, infrav1.LoadBalancerReadyCondition, infrav1.WaitForExternalControlPlaneEndpointReason, clusterv1.ConditionSeverityInfo, "") - - return &requeueAfterPeriod - case awsCluster.Spec.ControlPlaneEndpoint.Port == 0: - clusterScope.Info("AWSCluster control plane endpoint port is still non-populated") - conditions.MarkFalse(awsCluster, infrav1.LoadBalancerReadyCondition, infrav1.WaitForExternalControlPlaneEndpointReason, clusterv1.ConditionSeverityInfo, "") - - return &requeueAfterPeriod - default: - conditions.MarkTrue(awsCluster, infrav1.LoadBalancerReadyCondition) - - return nil - } -} diff --git a/controllers/awscluster_controller_test.go b/controllers/awscluster_controller_test.go index cb74ddacd1..2b6a6f7285 100644 --- a/controllers/awscluster_controller_test.go +++ b/controllers/awscluster_controller_test.go @@ -30,7 +30,6 @@ import ( corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/tools/record" - "k8s.io/client-go/util/retry" "sigs.k8s.io/controller-runtime/pkg/client" infrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2" @@ -67,118 +66,6 @@ func TestAWSClusterReconcilerIntegrationTests(t *testing.T) { teardown := func() { mockCtrl.Finish() } - t.Run("Should wait for external Control Plane endpoint when LoadBalancer is disabled, and eventually succeed when patched", func(t *testing.T) { - g := NewWithT(t) - mockCtrl = gomock.NewController(t) - ec2Mock := mocks.NewMockEC2API(mockCtrl) - expect := func(m *mocks.MockEC2APIMockRecorder) { - // First iteration, when the AWS Cluster is missing a valid Control Plane Endpoint - mockedVPCCallsForExistingVPCAndSubnets(m) - mockedCreateSGCalls(false, "vpc-exists", m) - mockedDescribeInstanceCall(m) - // Second iteration: the AWS Cluster object has been patched, - // thus a valid Control Plane Endpoint has been provided - mockedVPCCallsForExistingVPCAndSubnets(m) - mockedCreateSGCalls(false, "vpc-exists", m) - mockedDescribeInstanceCall(m) - } - expect(ec2Mock.EXPECT()) - - setup(t) - controllerIdentity := createControllerIdentity(g) - ns, err := testEnv.CreateNamespace(ctx, fmt.Sprintf("integ-test-%s", util.RandomString(5))) - g.Expect(err).To(BeNil()) - // Creating the AWS cluster with a disabled Load Balancer: - // no ALB, ELB, or NLB specified, the AWS cluster must consistently be reported - // waiting for the control Plane endpoint. - awsCluster := getAWSCluster("test", ns.Name) - awsCluster.Spec.ControlPlaneLoadBalancer = &infrav1.AWSLoadBalancerSpec{ - LoadBalancerType: infrav1.LoadBalancerTypeDisabled, - } - - g.Expect(testEnv.Create(ctx, &awsCluster)).To(Succeed()) - - defer teardown() - defer t.Cleanup(func() { - g.Expect(testEnv.Cleanup(ctx, &awsCluster, controllerIdentity, ns)).To(Succeed()) - }) - - cs, err := getClusterScope(awsCluster) - g.Expect(err).To(BeNil()) - networkSvc := network.NewService(cs) - networkSvc.EC2Client = ec2Mock - reconciler.networkServiceFactory = func(clusterScope scope.ClusterScope) services.NetworkInterface { - return networkSvc - } - - ec2Svc := ec2Service.NewService(cs) - ec2Svc.EC2Client = ec2Mock - reconciler.ec2ServiceFactory = func(scope scope.EC2Scope) services.EC2Interface { - return ec2Svc - } - testSecurityGroupRoles := []infrav1.SecurityGroupRole{ - infrav1.SecurityGroupBastion, - infrav1.SecurityGroupAPIServerLB, - infrav1.SecurityGroupLB, - infrav1.SecurityGroupControlPlane, - infrav1.SecurityGroupNode, - } - sgSvc := securitygroup.NewService(cs, testSecurityGroupRoles) - sgSvc.EC2Client = ec2Mock - - reconciler.securityGroupFactory = func(clusterScope scope.ClusterScope) services.SecurityGroupInterface { - return sgSvc - } - cs.SetSubnets([]infrav1.SubnetSpec{ - { - ID: "subnet-2", - AvailabilityZone: "us-east-1c", - IsPublic: true, - CidrBlock: "10.0.11.0/24", - }, - { - ID: "subnet-1", - AvailabilityZone: "us-east-1a", - CidrBlock: "10.0.10.0/24", - IsPublic: false, - }, - }) - - _, err = reconciler.reconcileNormal(cs) - g.Expect(err).To(BeNil()) - - cluster := &infrav1.AWSCluster{} - g.Expect(testEnv.Get(ctx, client.ObjectKey{Name: cs.AWSCluster.Name, Namespace: cs.AWSCluster.Namespace}, cluster)).ToNot(HaveOccurred()) - g.Expect(cluster.Spec.ControlPlaneEndpoint.Host).To(BeEmpty()) - g.Expect(cluster.Spec.ControlPlaneEndpoint.Port).To(BeZero()) - expectAWSClusterConditions(g, cs.AWSCluster, []conditionAssertion{ - {conditionType: infrav1.LoadBalancerReadyCondition, status: corev1.ConditionFalse, severity: clusterv1.ConditionSeverityInfo, reason: infrav1.WaitForExternalControlPlaneEndpointReason}, - }) - // Mimicking an external operator patching the cluster with an already provisioned Load Balancer: - // this could be done by a human who provisioned a LB, or by a Control Plane provider. - g.Expect(retry.RetryOnConflict(retry.DefaultRetry, func() error { - if err = testEnv.Get(ctx, client.ObjectKey{Name: cs.AWSCluster.Name, Namespace: cs.AWSCluster.Namespace}, cs.AWSCluster); err != nil { - return err - } - - cs.AWSCluster.Spec.ControlPlaneEndpoint.Host = "10.0.10.1" - cs.AWSCluster.Spec.ControlPlaneEndpoint.Port = 6443 - - return testEnv.Update(ctx, cs.AWSCluster) - })).To(Succeed()) - // Executing back a second reconciliation: - // the AWS Cluster should be ready with no LoadBalancer false condition. - _, err = reconciler.reconcileNormal(cs) - g.Expect(err).To(BeNil()) - g.Expect(cs.VPC().ID).To(Equal("vpc-exists")) - expectAWSClusterConditions(g, cs.AWSCluster, []conditionAssertion{ - {conditionType: infrav1.ClusterSecurityGroupsReadyCondition, status: corev1.ConditionTrue, severity: "", reason: ""}, - {conditionType: infrav1.BastionHostReadyCondition, status: corev1.ConditionTrue, severity: "", reason: ""}, - {conditionType: infrav1.VpcReadyCondition, status: corev1.ConditionTrue, severity: "", reason: ""}, - {conditionType: infrav1.SubnetsReadyCondition, status: corev1.ConditionTrue, severity: "", reason: ""}, - {conditionType: infrav1.LoadBalancerReadyCondition, status: corev1.ConditionTrue, severity: "", reason: ""}, - }) - }) t.Run("Should successfully reconcile AWSCluster creation with unmanaged VPC", func(t *testing.T) { g := NewWithT(t) mockCtrl = gomock.NewController(t) diff --git a/controllers/awsmachine_controller.go b/controllers/awsmachine_controller.go index 7ef74fe8c5..ced22d20ef 100644 --- a/controllers/awsmachine_controller.go +++ b/controllers/awsmachine_controller.go @@ -32,7 +32,6 @@ import ( "github.com/pkg/errors" corev1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" kerrors "k8s.io/apimachinery/pkg/util/errors" "k8s.io/client-go/tools/record" "k8s.io/klog/v2" @@ -61,7 +60,6 @@ import ( "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/services/userdata" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/logger" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" - "sigs.k8s.io/cluster-api/controllers/external" capierrors "sigs.k8s.io/cluster-api/errors" "sigs.k8s.io/cluster-api/util" "sigs.k8s.io/cluster-api/util/annotations" @@ -202,16 +200,10 @@ func (r *AWSMachineReconciler) Reconcile(ctx context.Context, req ctrl.Request) infrav1.SetDefaults_AWSMachineSpec(&awsMachine.Spec) - cp, err := r.getControlPlane(ctx, log, cluster) - if err != nil { - return ctrl.Result{}, err - } - // Create the machine scope machineScope, err := scope.NewMachineScope(scope.MachineScopeParams{ Client: r.Client, Cluster: cluster, - ControlPlane: cp, Machine: machine, InfraCluster: infraCluster, AWSMachine: awsMachine, @@ -1225,22 +1217,3 @@ func (r *AWSMachineReconciler) ensureInstanceMetadataOptions(ec2svc services.EC2 return ec2svc.ModifyInstanceMetadataOptions(instance.ID, machine.Spec.InstanceMetadataOptions) } - -// +kubebuilder:rbac:groups=controlplane.cluster.x-k8s.io,resources=*,verbs=get;list;watch - -func (r *AWSMachineReconciler) getControlPlane(ctx context.Context, log *logger.Logger, cluster *clusterv1.Cluster) (*unstructured.Unstructured, error) { - var ns string - - if ns = cluster.Spec.ControlPlaneRef.Namespace; ns == "" { - ns = cluster.Namespace - } - - controlPlane, err := external.Get(ctx, r.Client, cluster.Spec.ControlPlaneRef, ns) - if err != nil { - log.Error(err, "unable to get ControlPlane referenced in the given cluster", "cluster", fmt.Sprintf("%s/%s", cluster.Namespace, cluster.Name)) - - return nil, err - } - - return controlPlane, nil -} diff --git a/controllers/awsmachine_controller_test.go b/controllers/awsmachine_controller_test.go index 733d6ce9e9..01122cad0e 100644 --- a/controllers/awsmachine_controller_test.go +++ b/controllers/awsmachine_controller_test.go @@ -30,7 +30,6 @@ import ( "github.com/pkg/errors" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/client-go/tools/record" "k8s.io/utils/ptr" "sigs.k8s.io/controller-runtime/pkg/client" @@ -418,7 +417,6 @@ func getMachineScope(cs *scope.ClusterScope, awsMachine *infrav1.AWSMachine) (*s InfrastructureReady: true, }, }, - ControlPlane: &unstructured.Unstructured{}, Machine: &clusterv1.Machine{ ObjectMeta: metav1.ObjectMeta{ Name: "test", diff --git a/controllers/awsmachine_controller_unit_test.go b/controllers/awsmachine_controller_unit_test.go index dd444c5275..d377583c85 100644 --- a/controllers/awsmachine_controller_unit_test.go +++ b/controllers/awsmachine_controller_unit_test.go @@ -33,7 +33,6 @@ import ( "github.com/pkg/errors" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/types" "k8s.io/client-go/tools/record" "k8s.io/klog/v2" @@ -53,7 +52,6 @@ import ( "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/logger" "sigs.k8s.io/cluster-api-provider-aws/v2/test/mocks" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" - kubeadmv1beta1 "sigs.k8s.io/cluster-api/controlplane/kubeadm/api/v1beta1" capierrors "sigs.k8s.io/cluster-api/errors" "sigs.k8s.io/cluster-api/util" ) @@ -131,7 +129,6 @@ func TestAWSMachineReconciler(t *testing.T) { }, }, InfraCluster: cs, - ControlPlane: &unstructured.Unstructured{}, AWSMachine: awsMachine, }, ) @@ -160,7 +157,6 @@ func TestAWSMachineReconciler(t *testing.T) { InfrastructureReady: true, }, }, - ControlPlane: &unstructured.Unstructured{}, Machine: &clusterv1.Machine{ Spec: clusterv1.MachineSpec{ ClusterName: "capi-test", @@ -394,7 +390,7 @@ func TestAWSMachineReconciler(t *testing.T) { g.Expect(ms.AWSMachine.Status.InstanceState).To(PointTo(Equal(infrav1.InstanceStatePending))) g.Expect(ms.AWSMachine.Status.Ready).To(BeFalse()) - g.Expect(buf.String()).To(ContainSubstring("EC2 instance state changed")) + g.Expect(buf.String()).To(ContainSubstring(("EC2 instance state changed"))) expectConditions(g, ms.AWSMachine, []conditionAssertion{{infrav1.InstanceReadyCondition, corev1.ConditionFalse, clusterv1.ConditionSeverityWarning, infrav1.InstanceNotReadyReason}}) }) @@ -414,7 +410,7 @@ func TestAWSMachineReconciler(t *testing.T) { g.Expect(ms.AWSMachine.Status.InstanceState).To(PointTo(Equal(infrav1.InstanceStateRunning))) g.Expect(ms.AWSMachine.Status.Ready).To(BeTrue()) - g.Expect(buf.String()).To(ContainSubstring("EC2 instance state changed")) + g.Expect(buf.String()).To(ContainSubstring(("EC2 instance state changed"))) expectConditions(g, ms.AWSMachine, []conditionAssertion{ {conditionType: infrav1.InstanceReadyCondition, status: corev1.ConditionTrue}, }) @@ -435,7 +431,7 @@ func TestAWSMachineReconciler(t *testing.T) { secretSvc.EXPECT().Create(gomock.Any(), gomock.Any()).Return("test", int32(1), nil).Times(1) _, _ = reconciler.reconcileNormal(context.Background(), ms, cs, cs, cs, cs) g.Expect(ms.AWSMachine.Status.Ready).To(BeFalse()) - g.Expect(buf.String()).To(ContainSubstring("EC2 instance state is undefined")) + g.Expect(buf.String()).To(ContainSubstring(("EC2 instance state is undefined"))) g.Eventually(recorder.Events).Should(Receive(ContainSubstring("InstanceUnhandledState"))) g.Expect(ms.AWSMachine.Status.FailureMessage).To(PointTo(Equal("EC2 instance state \"NewAWSMachineState\" is undefined"))) expectConditions(g, ms.AWSMachine, []conditionAssertion{{conditionType: infrav1.InstanceReadyCondition, status: corev1.ConditionUnknown}}) @@ -576,7 +572,7 @@ func TestAWSMachineReconciler(t *testing.T) { _, _ = reconciler.reconcileNormal(context.Background(), ms, cs, cs, cs, cs) g.Expect(ms.AWSMachine.Status.InstanceState).To(PointTo(Equal(infrav1.InstanceStateStopping))) g.Expect(ms.AWSMachine.Status.Ready).To(BeFalse()) - g.Expect(buf.String()).To(ContainSubstring("EC2 instance state changed")) + g.Expect(buf.String()).To(ContainSubstring(("EC2 instance state changed"))) expectConditions(g, ms.AWSMachine, []conditionAssertion{{infrav1.InstanceReadyCondition, corev1.ConditionFalse, clusterv1.ConditionSeverityError, infrav1.InstanceStoppedReason}}) }) @@ -592,7 +588,7 @@ func TestAWSMachineReconciler(t *testing.T) { _, _ = reconciler.reconcileNormal(context.Background(), ms, cs, cs, cs, cs) g.Expect(ms.AWSMachine.Status.InstanceState).To(PointTo(Equal(infrav1.InstanceStateStopped))) g.Expect(ms.AWSMachine.Status.Ready).To(BeFalse()) - g.Expect(buf.String()).To(ContainSubstring("EC2 instance state changed")) + g.Expect(buf.String()).To(ContainSubstring(("EC2 instance state changed"))) expectConditions(g, ms.AWSMachine, []conditionAssertion{{infrav1.InstanceReadyCondition, corev1.ConditionFalse, clusterv1.ConditionSeverityError, infrav1.InstanceStoppedReason}}) }) @@ -608,7 +604,7 @@ func TestAWSMachineReconciler(t *testing.T) { _, _ = reconciler.reconcileNormal(context.Background(), ms, cs, cs, cs, cs) g.Expect(ms.AWSMachine.Status.InstanceState).To(PointTo(Equal(infrav1.InstanceStateRunning))) g.Expect(ms.AWSMachine.Status.Ready).To(BeTrue()) - g.Expect(buf.String()).To(ContainSubstring("EC2 instance state changed")) + g.Expect(buf.String()).To(ContainSubstring(("EC2 instance state changed"))) }) }) t.Run("deleting the AWSMachine manually", func(t *testing.T) { @@ -633,7 +629,7 @@ func TestAWSMachineReconciler(t *testing.T) { instance.State = infrav1.InstanceStateShuttingDown _, _ = reconciler.reconcileNormal(context.Background(), ms, cs, cs, cs, cs) g.Expect(ms.AWSMachine.Status.Ready).To(BeFalse()) - g.Expect(buf.String()).To(ContainSubstring("Unexpected EC2 instance termination")) + g.Expect(buf.String()).To(ContainSubstring(("Unexpected EC2 instance termination"))) g.Eventually(recorder.Events).Should(Receive(ContainSubstring("UnexpectedTermination"))) }) @@ -648,7 +644,7 @@ func TestAWSMachineReconciler(t *testing.T) { instance.State = infrav1.InstanceStateTerminated _, _ = reconciler.reconcileNormal(context.Background(), ms, cs, cs, cs, cs) g.Expect(ms.AWSMachine.Status.Ready).To(BeFalse()) - g.Expect(buf.String()).To(ContainSubstring("Unexpected EC2 instance termination")) + g.Expect(buf.String()).To(ContainSubstring(("Unexpected EC2 instance termination"))) g.Eventually(recorder.Events).Should(Receive(ContainSubstring("UnexpectedTermination"))) g.Expect(ms.AWSMachine.Status.FailureMessage).To(PointTo(Equal("EC2 instance state \"terminated\" is unexpected"))) expectConditions(g, ms.AWSMachine, []conditionAssertion{{infrav1.InstanceReadyCondition, corev1.ConditionFalse, clusterv1.ConditionSeverityError, infrav1.InstanceTerminatedReason}}) @@ -2501,10 +2497,6 @@ func TestAWSMachineReconcilerReconcileDefaultsToLoadBalancerTypeClassic(t *testi ns := "testns" - cp := &kubeadmv1beta1.KubeadmControlPlane{} - cp.SetName("capi-cp-test-1") - cp.SetNamespace(ns) - ownerCluster := &clusterv1.Cluster{ ObjectMeta: metav1.ObjectMeta{Name: "capi-test-1", Namespace: ns}, Spec: clusterv1.ClusterSpec{ @@ -2514,12 +2506,6 @@ func TestAWSMachineReconcilerReconcileDefaultsToLoadBalancerTypeClassic(t *testi Namespace: ns, APIVersion: infrav1.GroupVersion.String(), }, - ControlPlaneRef: &corev1.ObjectReference{ - Kind: "KubeadmControlPlane", - Namespace: cp.Namespace, - Name: cp.Name, - APIVersion: kubeadmv1beta1.GroupVersion.String(), - }, }, Status: clusterv1.ClusterStatus{ InfrastructureReady: true, @@ -2639,7 +2625,7 @@ func TestAWSMachineReconcilerReconcileDefaultsToLoadBalancerTypeClassic(t *testi }, } - fakeClient := fake.NewClientBuilder().WithObjects(ownerCluster, awsCluster, ownerMachine, awsMachine, controllerIdentity, secret, cp).WithStatusSubresource(awsCluster, awsMachine).Build() + fakeClient := fake.NewClientBuilder().WithObjects(ownerCluster, awsCluster, ownerMachine, awsMachine, controllerIdentity, secret).WithStatusSubresource(awsCluster, awsMachine).Build() recorder := record.NewFakeRecorder(10) reconciler := &AWSMachineReconciler{ diff --git a/controllers/suite_test.go b/controllers/suite_test.go index 98f392a7b1..4adf3e779d 100644 --- a/controllers/suite_test.go +++ b/controllers/suite_test.go @@ -29,7 +29,6 @@ import ( infrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2" "sigs.k8s.io/cluster-api-provider-aws/v2/test/helpers" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" - kubeadmv1beta1 "sigs.k8s.io/cluster-api/controlplane/kubeadm/api/v1beta1" ) var ( @@ -46,7 +45,6 @@ func TestMain(m *testing.M) { func setup() { utilruntime.Must(infrav1.AddToScheme(scheme.Scheme)) utilruntime.Must(clusterv1.AddToScheme(scheme.Scheme)) - utilruntime.Must(kubeadmv1beta1.AddToScheme(scheme.Scheme)) testEnvConfig := helpers.NewTestEnvironmentConfiguration([]string{ path.Join("config", "crd", "bases"), }, diff --git a/pkg/cloud/scope/machine.go b/pkg/cloud/scope/machine.go index fcb735c22e..ee98c78292 100644 --- a/pkg/cloud/scope/machine.go +++ b/pkg/cloud/scope/machine.go @@ -23,7 +23,6 @@ import ( "github.com/pkg/errors" corev1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/types" "k8s.io/klog/v2" "k8s.io/utils/ptr" @@ -44,7 +43,6 @@ import ( type MachineScopeParams struct { Client client.Client Logger *logger.Logger - ControlPlane *unstructured.Unstructured Cluster *clusterv1.Cluster Machine *clusterv1.Machine InfraCluster EC2Scope @@ -69,9 +67,6 @@ func NewMachineScope(params MachineScopeParams) (*MachineScope, error) { if params.InfraCluster == nil { return nil, errors.New("aws cluster is required when creating a MachineScope") } - if params.ControlPlane == nil { - return nil, errors.New("cluster control plane is required when creating a MachineScope") - } if params.Logger == nil { log := klog.Background() @@ -83,10 +78,10 @@ func NewMachineScope(params MachineScopeParams) (*MachineScope, error) { return nil, errors.Wrap(err, "failed to init patch helper") } return &MachineScope{ - Logger: *params.Logger, - client: params.Client, - patchHelper: helper, - ControlPlane: params.ControlPlane, + Logger: *params.Logger, + client: params.Client, + patchHelper: helper, + Cluster: params.Cluster, Machine: params.Machine, InfraCluster: params.InfraCluster, @@ -102,7 +97,6 @@ type MachineScope struct { Cluster *clusterv1.Cluster Machine *clusterv1.Machine - ControlPlane *unstructured.Unstructured InfraCluster EC2Scope AWSMachine *infrav1.AWSMachine } @@ -377,10 +371,6 @@ func (m *MachineScope) IsEKSManaged() bool { return m.InfraCluster.InfraCluster().GetObjectKind().GroupVersionKind().Kind == ekscontrolplanev1.AWSManagedControlPlaneKind } -func (m *MachineScope) IsControlPlaneExternallyManaged() bool { - return util.IsExternalManagedControlPlane(m.ControlPlane) -} - // IsExternallyManaged checks if the machine is externally managed. func (m *MachineScope) IsExternallyManaged() bool { return annotations.IsExternallyManaged(m.InfraCluster.InfraCluster()) diff --git a/pkg/cloud/scope/machine_test.go b/pkg/cloud/scope/machine_test.go index 9cad370f35..f34790d061 100644 --- a/pkg/cloud/scope/machine_test.go +++ b/pkg/cloud/scope/machine_test.go @@ -22,7 +22,6 @@ import ( corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime" "k8s.io/utils/ptr" "sigs.k8s.io/controller-runtime/pkg/client" @@ -133,8 +132,7 @@ func setupMachineScope() (*MachineScope, error) { InfraCluster: &ClusterScope{ AWSCluster: awsCluster, }, - ControlPlane: &unstructured.Unstructured{}, - AWSMachine: awsMachine, + AWSMachine: awsMachine, }, ) } @@ -225,10 +223,9 @@ func TestGetRawBootstrapDataWithFormat(t *testing.T) { machineScope, err := NewMachineScope( MachineScopeParams{ - Client: client, - Machine: machine, - Cluster: cluster, - ControlPlane: &unstructured.Unstructured{}, + Client: client, + Machine: machine, + Cluster: cluster, InfraCluster: &ClusterScope{ AWSCluster: awsCluster, }, diff --git a/pkg/cloud/services/ec2/instances.go b/pkg/cloud/services/ec2/instances.go index 1fbcce3c90..ad9a746a8d 100644 --- a/pkg/cloud/services/ec2/instances.go +++ b/pkg/cloud/services/ec2/instances.go @@ -181,7 +181,7 @@ func (s *Service) CreateInstance(scope *scope.MachineScope, userData []byte, use } input.SubnetID = subnetID - if !scope.IsControlPlaneExternallyManaged() && !scope.IsExternallyManaged() && !scope.IsEKSManaged() && s.scope.Network().APIServerELB.DNSName == "" { + if !scope.IsExternallyManaged() && !scope.IsEKSManaged() && s.scope.Network().APIServerELB.DNSName == "" { record.Eventf(s.scope.InfraCluster(), "FailedCreateInstance", "Failed to run controlplane, APIServer ELB not available") return nil, awserrors.NewFailedDependency("failed to run controlplane, APIServer ELB not available") diff --git a/pkg/cloud/services/ec2/instances_test.go b/pkg/cloud/services/ec2/instances_test.go index 9ccf5a67ba..f68e4a5f5e 100644 --- a/pkg/cloud/services/ec2/instances_test.go +++ b/pkg/cloud/services/ec2/instances_test.go @@ -31,7 +31,6 @@ import ( "github.com/pkg/errors" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime" "k8s.io/utils/ptr" "sigs.k8s.io/controller-runtime/pkg/client/fake" @@ -4036,7 +4035,6 @@ func TestCreateInstance(t *testing.T) { machineScope, err := scope.NewMachineScope(scope.MachineScopeParams{ Client: client, Cluster: cluster, - ControlPlane: &unstructured.Unstructured{}, Machine: machine, AWSMachine: awsMachine, InfraCluster: clusterScope, diff --git a/pkg/cloud/services/secretsmanager/secret_test.go b/pkg/cloud/services/secretsmanager/secret_test.go index df4976ea4e..87cf7e958a 100644 --- a/pkg/cloud/services/secretsmanager/secret_test.go +++ b/pkg/cloud/services/secretsmanager/secret_test.go @@ -26,7 +26,6 @@ import ( "github.com/golang/mock/gomock" . "github.com/onsi/gomega" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/client/fake" @@ -281,7 +280,6 @@ func getClusterScope(client client.Client) (*scope.ClusterScope, error) { func getMachineScope(client client.Client, clusterScope *scope.ClusterScope) (*scope.MachineScope, error) { return scope.NewMachineScope(scope.MachineScopeParams{ Client: client, - ControlPlane: &unstructured.Unstructured{}, Cluster: clusterScope.Cluster, Machine: &clusterv1.Machine{}, InfraCluster: clusterScope, diff --git a/pkg/cloud/services/ssm/secret_test.go b/pkg/cloud/services/ssm/secret_test.go index 4e82494848..04afa9e1d4 100644 --- a/pkg/cloud/services/ssm/secret_test.go +++ b/pkg/cloud/services/ssm/secret_test.go @@ -28,7 +28,6 @@ import ( "github.com/google/go-cmp/cmp" . "github.com/onsi/gomega" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/client/fake" @@ -274,7 +273,6 @@ func getClusterScope(client client.Client) (*scope.ClusterScope, error) { func getMachineScope(client client.Client, clusterScope *scope.ClusterScope) (*scope.MachineScope, error) { return scope.NewMachineScope(scope.MachineScopeParams{ Client: client, - ControlPlane: &unstructured.Unstructured{}, Cluster: clusterScope.Cluster, Machine: &clusterv1.Machine{}, InfraCluster: clusterScope,