From e5f201c5585f2498f9e8899ebeb41f85870e32c6 Mon Sep 17 00:00:00 2001 From: willie-yao Date: Thu, 16 Feb 2023 21:30:06 +0000 Subject: [PATCH 1/5] Add clusterclass support for managed clusters --- .../azuremanagedclustertemplate_types.go | 56 ++ .../azuremanagedclustertemplate_webhook.go | 78 +++ .../azuremanagedcontrolplane_default.go | 11 + api/v1beta1/azuremanagedcontrolplane_types.go | 5 + .../azuremanagedcontrolplane_webhook.go | 232 ++++--- .../azuremanagedcontrolplane_webhook_test.go | 325 +++++++++- ...zuremanagedcontrolplanetemplate_default.go | 133 ++++ ...anagedcontrolplanetemplate_default_test.go | 424 ++++++++++++ .../azuremanagedcontrolplanetemplate_types.go | 56 ++ ...emanagedcontrolplanetemplate_validation.go | 53 ++ ...zuremanagedcontrolplanetemplate_webhook.go | 107 +++ .../azuremanagedmachinepooltemplate_types.go | 56 ++ api/v1beta1/consts.go | 5 + api/v1beta1/types_template.go | 277 ++++++++ api/v1beta1/zz_generated.deepcopy.go | 576 +++++++++++++++++ ...x-k8s.io_azuremanagedclustertemplates.yaml | 58 ++ ...er.x-k8s.io_azuremanagedcontrolplanes.yaml | 5 + ....io_azuremanagedcontrolplanetemplates.yaml | 557 ++++++++++++++++ ...s.io_azuremanagedmachinepooltemplates.yaml | 612 ++++++++++++++++++ config/crd/kustomization.yaml | 3 + config/webhook/manifests.yaml | 64 ++ controllers/helpers.go | 5 + docs/book/src/topics/clusterclass.md | 88 +++ main.go | 10 + ...ter-template-aks-clusterclass-cluster.yaml | 21 + .../cluster-template-aks-clusterclass.yaml | 155 +++++ ...cluster-template-clusterclass-cluster.yaml | 23 + templates/cluster-template-clusterclass.yaml | 24 - .../aks-clusterclass-cluster/cluster.yaml | 21 + .../kustomization.yaml | 5 + .../azure-managed-cluster-template.yaml | 9 + .../azure-managed-controlplane-template.yaml | 15 + .../azure-managed-machinepool-template.yaml | 23 + .../aks-clusterclass/clusterclass.yaml | 43 ++ .../kubeadm-config-template.yaml | 47 ++ .../aks-clusterclass/kustomization.yaml | 10 + .../managedazurecluster-identity-ref.yaml | 9 + .../cluster.yaml | 0 .../clusterclass-cluster/kustomization.yaml | 3 + .../flavors/clusterclass/kustomization.yaml | 1 - ...luster-template-prow-aks-clusterclass.yaml | 273 ++++++++ .../prow-aks-clusterclass/kustomization.yaml | 13 + .../ci/prow-aks-clusterclass/patches.yaml | 54 ++ .../prow-aks-clusterclass/patches/addons.yaml | 11 + .../patches/aks-clusterclass-pool0.yaml | 15 + .../patches/aks-clusterclass-pool1.yaml | 68 ++ .../patches/cluster.yaml | 7 + .../patches/tags-aks-clusterclass.yaml | 12 + .../test/ci/prow-topology/kustomization.yaml | 2 +- test/e2e/aks_autoscaler.go | 16 +- test/e2e/azure_test.go | 129 ++++ test/e2e/common.go | 3 +- test/e2e/config/azure-dev.yaml | 16 +- 53 files changed, 4678 insertions(+), 146 deletions(-) create mode 100644 api/v1beta1/azuremanagedclustertemplate_types.go create mode 100644 api/v1beta1/azuremanagedclustertemplate_webhook.go create mode 100644 api/v1beta1/azuremanagedcontrolplanetemplate_default.go create mode 100644 api/v1beta1/azuremanagedcontrolplanetemplate_default_test.go create mode 100644 api/v1beta1/azuremanagedcontrolplanetemplate_types.go create mode 100644 api/v1beta1/azuremanagedcontrolplanetemplate_validation.go create mode 100644 api/v1beta1/azuremanagedcontrolplanetemplate_webhook.go create mode 100644 api/v1beta1/azuremanagedmachinepooltemplate_types.go create mode 100644 config/crd/bases/infrastructure.cluster.x-k8s.io_azuremanagedclustertemplates.yaml create mode 100644 config/crd/bases/infrastructure.cluster.x-k8s.io_azuremanagedcontrolplanetemplates.yaml create mode 100644 config/crd/bases/infrastructure.cluster.x-k8s.io_azuremanagedmachinepooltemplates.yaml create mode 100644 docs/book/src/topics/clusterclass.md create mode 100644 templates/cluster-template-aks-clusterclass-cluster.yaml create mode 100644 templates/cluster-template-aks-clusterclass.yaml create mode 100644 templates/cluster-template-clusterclass-cluster.yaml create mode 100644 templates/flavors/aks-clusterclass-cluster/cluster.yaml create mode 100644 templates/flavors/aks-clusterclass-cluster/kustomization.yaml create mode 100644 templates/flavors/aks-clusterclass/azure-managed-cluster-template.yaml create mode 100644 templates/flavors/aks-clusterclass/azure-managed-controlplane-template.yaml create mode 100644 templates/flavors/aks-clusterclass/azure-managed-machinepool-template.yaml create mode 100644 templates/flavors/aks-clusterclass/clusterclass.yaml create mode 100644 templates/flavors/aks-clusterclass/kubeadm-config-template.yaml create mode 100644 templates/flavors/aks-clusterclass/kustomization.yaml create mode 100644 templates/flavors/aks-clusterclass/patches/managedazurecluster-identity-ref.yaml rename templates/flavors/{clusterclass => clusterclass-cluster}/cluster.yaml (100%) create mode 100644 templates/flavors/clusterclass-cluster/kustomization.yaml create mode 100644 templates/test/ci/cluster-template-prow-aks-clusterclass.yaml create mode 100644 templates/test/ci/prow-aks-clusterclass/kustomization.yaml create mode 100644 templates/test/ci/prow-aks-clusterclass/patches.yaml create mode 100644 templates/test/ci/prow-aks-clusterclass/patches/addons.yaml create mode 100644 templates/test/ci/prow-aks-clusterclass/patches/aks-clusterclass-pool0.yaml create mode 100644 templates/test/ci/prow-aks-clusterclass/patches/aks-clusterclass-pool1.yaml create mode 100644 templates/test/ci/prow-aks-clusterclass/patches/cluster.yaml create mode 100644 templates/test/ci/prow-aks-clusterclass/patches/tags-aks-clusterclass.yaml diff --git a/api/v1beta1/azuremanagedclustertemplate_types.go b/api/v1beta1/azuremanagedclustertemplate_types.go new file mode 100644 index 00000000000..84a9dee91ce --- /dev/null +++ b/api/v1beta1/azuremanagedclustertemplate_types.go @@ -0,0 +1,56 @@ +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// AzureManagedClusterTemplateSpec defines the desired state of AzureManagedClusterTemplate. +type AzureManagedClusterTemplateSpec struct { + Template AzureManagedClusterTemplateResource `json:"template"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:resource:path=azuremanagedclustertemplates,scope=Namespaced,categories=cluster-api +// +kubebuilder:storageversion + +// AzureManagedClusterTemplate is the Schema for the AzureManagedClusterTemplates API. +type AzureManagedClusterTemplate struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec AzureManagedClusterTemplateSpec `json:"spec,omitempty"` +} + +// +kubebuilder:object:root=true + +// AzureManagedClusterTemplateList contains a list of AzureManagedClusterTemplates. +type AzureManagedClusterTemplateList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []AzureManagedClusterTemplate `json:"items"` +} + +func init() { + SchemeBuilder.Register(&AzureManagedClusterTemplate{}, &AzureManagedClusterTemplateList{}) +} + +// AzureManagedClusterTemplateResource describes the data needed to create an AzureManagedCluster from a template. +type AzureManagedClusterTemplateResource struct { + Spec AzureManagedClusterTemplateResourceSpec `json:"spec"` +} diff --git a/api/v1beta1/azuremanagedclustertemplate_webhook.go b/api/v1beta1/azuremanagedclustertemplate_webhook.go new file mode 100644 index 00000000000..51b4b8055a3 --- /dev/null +++ b/api/v1beta1/azuremanagedclustertemplate_webhook.go @@ -0,0 +1,78 @@ +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + "reflect" + + apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/util/validation/field" + "sigs.k8s.io/cluster-api-provider-azure/feature" + capifeature "sigs.k8s.io/cluster-api/feature" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/webhook" + "sigs.k8s.io/controller-runtime/pkg/webhook/admission" +) + +// AzureManagedClusterTemplateImmutableMsg is the message used for errors on fields that are immutable. +const AzureManagedClusterTemplateImmutableMsg = "AzureManagedClusterTemplate spec.template.spec field is immutable. Please create new resource instead. ref doc: https://cluster-api.sigs.k8s.io/tasks/experimental-features/cluster-class/change-clusterclass.html" + +// SetupWebhookWithManager sets up and registers the webhook with the manager. +func (r *AzureManagedClusterTemplate) SetupWebhookWithManager(mgr ctrl.Manager) error { + return ctrl.NewWebhookManagedBy(mgr). + For(r). + Complete() +} + +// +kubebuilder:webhook:verbs=update,path=/validate-infrastructure-cluster-x-k8s-io-v1beta1-azuremanagedclustertemplate,mutating=false,failurePolicy=fail,groups=infrastructure.cluster.x-k8s.io,resources=azuremanagedclustertemplates,versions=v1beta1,name=validation.azuremanagedclustertemplates.infrastructure.cluster.x-k8s.io,sideEffects=None,admissionReviewVersions=v1;v1beta1 + +var _ webhook.Validator = &AzureManagedClusterTemplate{} + +// ValidateCreate implements webhook.Validator so a webhook will be registered for the type. +func (r *AzureManagedClusterTemplate) ValidateCreate() (admission.Warnings, error) { + // NOTE: AzureManagedClusterTemplate relies upon MachinePools, which is behind a feature gate flag. + // The webhook must prevent creating new objects in case the feature flag is disabled. + if !feature.Gates.Enabled(capifeature.MachinePool) { + return nil, field.Forbidden( + field.NewPath("spec"), + "can be set only if the Cluster API 'MachinePool' feature flag is enabled", + ) + } + return nil, nil +} + +// ValidateUpdate implements webhook.Validator so a webhook will be registered for the type. +func (r *AzureManagedClusterTemplate) ValidateUpdate(oldRaw runtime.Object) (admission.Warnings, error) { + var allErrs field.ErrorList + old := oldRaw.(*AzureManagedClusterTemplate) + if !reflect.DeepEqual(r.Spec.Template.Spec, old.Spec.Template.Spec) { + allErrs = append(allErrs, + field.Invalid(field.NewPath("AzureManagedClusterTemplate", "spec", "template", "spec"), rScanInterval, AzureManagedClusterTemplateImmutableMsg), + ) + } + + if len(allErrs) == 0 { + return nil, nil + } + return nil, apierrors.NewInvalid(GroupVersion.WithKind("AzureManagedClusterTemplate").GroupKind(), r.Name, allErrs) +} + +// ValidateDelete implements webhook.Validator so a webhook will be registered for the type. +func (r *AzureManagedClusterTemplate) ValidateDelete() (admission.Warnings, error) { + return nil, nil +} diff --git a/api/v1beta1/azuremanagedcontrolplane_default.go b/api/v1beta1/azuremanagedcontrolplane_default.go index 9e2940f64fc..26720fbe2ac 100644 --- a/api/v1beta1/azuremanagedcontrolplane_default.go +++ b/api/v1beta1/azuremanagedcontrolplane_default.go @@ -23,6 +23,7 @@ import ( "golang.org/x/crypto/ssh" "k8s.io/utils/ptr" utilSSH "sigs.k8s.io/cluster-api-provider-azure/util/ssh" + clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" ) const ( @@ -50,6 +51,16 @@ func (m *AzureManagedControlPlane) setDefaultSSHPublicKey() error { return nil } +// setDefaultResourceGroupName sets the default ResourceGroupName for an AzureManagedControlPlane. +func (m *AzureManagedControlPlane) setDefaultResourceGroupName() { + if m.Spec.ResourceGroupName == "" { + if clusterName, ok := m.Labels[clusterv1.ClusterNameLabel]; ok { + m.Spec.ResourceGroupName = clusterName + fmt.Printf("WILLIE ResourceGroupName is empty, defaulting to %s\n", m.Spec.ResourceGroupName) + } + } +} + // setDefaultNodeResourceGroupName sets the default NodeResourceGroup for an AzureManagedControlPlane. func (m *AzureManagedControlPlane) setDefaultNodeResourceGroupName() { if m.Spec.NodeResourceGroupName == "" { diff --git a/api/v1beta1/azuremanagedcontrolplane_types.go b/api/v1beta1/azuremanagedcontrolplane_types.go index 545deffdb8a..2affe9dbf26 100644 --- a/api/v1beta1/azuremanagedcontrolplane_types.go +++ b/api/v1beta1/azuremanagedcontrolplane_types.go @@ -368,6 +368,11 @@ type AzureManagedControlPlaneStatus struct { // +optional Ready bool `json:"ready,omitempty"` + // Total number of non-terminated machines targeted by this control plane + // (their labels match the selector). + // +optional + Replicas int32 `json:"replicas"` + // Initialized is true when the control plane is available for initial contact. // This may occur before the control plane is fully ready. // In the AzureManagedControlPlane implementation, these are identical. diff --git a/api/v1beta1/azuremanagedcontrolplane_webhook.go b/api/v1beta1/azuremanagedcontrolplane_webhook.go index 5637c32e3a3..2da8ad01bac 100644 --- a/api/v1beta1/azuremanagedcontrolplane_webhook.go +++ b/api/v1beta1/azuremanagedcontrolplane_webhook.go @@ -18,7 +18,6 @@ package v1beta1 import ( "context" - "errors" "fmt" "net" "reflect" @@ -73,7 +72,7 @@ func (mw *azureManagedControlPlaneWebhook) Default(ctx context.Context, obj runt return apierrors.NewBadRequest("expected an AzureManagedControlPlane") } if m.Spec.NetworkPlugin == nil { - networkPlugin := "azure" + networkPlugin := CloudProviderName m.Spec.NetworkPlugin = &networkPlugin } if m.Spec.LoadBalancerSKU == nil { @@ -102,6 +101,7 @@ func (mw *azureManagedControlPlaneWebhook) Default(ctx context.Context, obj runt ctrl.Log.WithName("AzureManagedControlPlaneWebHookLogger").Info("Paid SKU tier is deprecated and has been replaced by Standard") } + m.setDefaultResourceGroupName() m.setDefaultNodeResourceGroupName() m.setDefaultVirtualNetwork() m.setDefaultSubnet() @@ -274,28 +274,58 @@ func (mw *azureManagedControlPlaneWebhook) ValidateDelete(ctx context.Context, o // Validate the Azure Managed Control Plane and return an aggregate error. func (m *AzureManagedControlPlane) Validate(cli client.Client) error { + var allErrs field.ErrorList validators := []func(client client.Client) error{ - m.validateName, - m.validateVersion, m.validateSSHKey, - m.validateLoadBalancerProfile, m.validateAPIServerAccessProfile, - m.validateManagedClusterNetwork, - m.validateAutoScalerProfile, m.validateIdentity, m.validateNetworkPluginMode, m.validateDNSPrefix, m.validateDisableLocalAccounts, } - - var errs []error for _, validator := range validators { if err := validator(cli); err != nil { - errs = append(errs, err) + allErrs = append(allErrs, field.InternalError(field.NewPath("spec"), err)) } } - return kerrors.NewAggregate(errs) + allErrs = append(allErrs, validateDNSServiceIP( + m.Spec.DNSServiceIP, + field.NewPath("spec").Child("DNSServiceIP"))...) + + allErrs = append(allErrs, validateVersion( + m.Spec.Version, + field.NewPath("spec").Child("Version"))...) + + allErrs = append(allErrs, validateLoadBalancerProfile( + m.Spec.LoadBalancerProfile, + field.NewPath("spec").Child("LoadBalancerProfile"))...) + + allErrs = append(allErrs, validateManagedClusterNetwork( + cli, + m.Labels, + m.Namespace, + m.Spec.DNSServiceIP, + m.Spec.VirtualNetwork.Subnet, + field.NewPath("spec").Child("spec"))...) + + allErrs = append(allErrs, validateName(m.Name, field.NewPath("Name"))...) + + allErrs = append(allErrs, validateAutoScalerProfile(m.Spec.AutoScalerProfile, field.NewPath("spec").Child("AutoScalerProfile"))...) + + return allErrs.ToAggregate() +} + +// validateDNSServiceIP validates the DNSServiceIP. +func validateDNSServiceIP(dnsServiceIP *string, fldPath *field.Path) field.ErrorList { + var allErrs field.ErrorList + if dnsServiceIP != nil { + if net.ParseIP(*dnsServiceIP) == nil { + allErrs = append(allErrs, field.Invalid(fldPath, dnsServiceIP, "DNSServiceIP must be a valid IP")) + } + } + + return allErrs } func (m *AzureManagedControlPlane) validateDNSPrefix(_ client.Client) error { @@ -327,12 +357,13 @@ func (m *AzureManagedControlPlane) validateDisableLocalAccounts(_ client.Client) } // validateVersion validates the Kubernetes version. -func (m *AzureManagedControlPlane) validateVersion(_ client.Client) error { - if !kubeSemver.MatchString(m.Spec.Version) { - return errors.New("must be a valid semantic version") +func validateVersion(version string, fldPath *field.Path) field.ErrorList { + var allErrs field.ErrorList + if !kubeSemver.MatchString(version) { + allErrs = append(allErrs, field.Invalid(fldPath, version, "must be a valid semantic version")) } - return nil + return allErrs } // validateSSHKey validates an SSHKey. @@ -347,52 +378,44 @@ func (m *AzureManagedControlPlane) validateSSHKey(_ client.Client) error { } // validateLoadBalancerProfile validates a LoadBalancerProfile. -func (m *AzureManagedControlPlane) validateLoadBalancerProfile(_ client.Client) error { - if m.Spec.LoadBalancerProfile != nil { - var errs []error - var allErrs field.ErrorList +func validateLoadBalancerProfile(loadBalancerProfile *LoadBalancerProfile, fldPath *field.Path) field.ErrorList { + var allErrs field.ErrorList + if loadBalancerProfile != nil { numOutboundIPTypes := 0 - if m.Spec.LoadBalancerProfile.ManagedOutboundIPs != nil { - if *m.Spec.LoadBalancerProfile.ManagedOutboundIPs < 1 || *m.Spec.LoadBalancerProfile.ManagedOutboundIPs > 100 { - allErrs = append(allErrs, field.Invalid(field.NewPath("Spec", "LoadBalancerProfile", "ManagedOutboundIPs"), *m.Spec.LoadBalancerProfile.ManagedOutboundIPs, "value should be in between 1 and 100")) + if loadBalancerProfile.ManagedOutboundIPs != nil { + if *loadBalancerProfile.ManagedOutboundIPs < 1 || *loadBalancerProfile.ManagedOutboundIPs > 100 { + allErrs = append(allErrs, field.Invalid(fldPath.Child("ManagedOutboundIPs"), *loadBalancerProfile.ManagedOutboundIPs, "value should be in between 1 and 100")) } } - if m.Spec.LoadBalancerProfile.AllocatedOutboundPorts != nil { - if *m.Spec.LoadBalancerProfile.AllocatedOutboundPorts < 0 || *m.Spec.LoadBalancerProfile.AllocatedOutboundPorts > 64000 { - allErrs = append(allErrs, field.Invalid(field.NewPath("Spec", "LoadBalancerProfile", "AllocatedOutboundPorts"), *m.Spec.LoadBalancerProfile.AllocatedOutboundPorts, "value should be in between 0 and 64000")) + if loadBalancerProfile.AllocatedOutboundPorts != nil { + if *loadBalancerProfile.AllocatedOutboundPorts < 0 || *loadBalancerProfile.AllocatedOutboundPorts > 64000 { + allErrs = append(allErrs, field.Invalid(fldPath.Child("AllocatedOutboundPorts"), *loadBalancerProfile.AllocatedOutboundPorts, "value should be in between 0 and 64000")) } } - if m.Spec.LoadBalancerProfile.IdleTimeoutInMinutes != nil { - if *m.Spec.LoadBalancerProfile.IdleTimeoutInMinutes < 4 || *m.Spec.LoadBalancerProfile.IdleTimeoutInMinutes > 120 { - allErrs = append(allErrs, field.Invalid(field.NewPath("Spec", "LoadBalancerProfile", "IdleTimeoutInMinutes"), *m.Spec.LoadBalancerProfile.IdleTimeoutInMinutes, "value should be in between 4 and 120")) + if loadBalancerProfile.IdleTimeoutInMinutes != nil { + if *loadBalancerProfile.IdleTimeoutInMinutes < 4 || *loadBalancerProfile.IdleTimeoutInMinutes > 120 { + allErrs = append(allErrs, field.Invalid(fldPath.Child("IdleTimeoutInMinutes"), *loadBalancerProfile.IdleTimeoutInMinutes, "value should be in between 4 and 120")) } } - if m.Spec.LoadBalancerProfile.ManagedOutboundIPs != nil { + if loadBalancerProfile.ManagedOutboundIPs != nil { numOutboundIPTypes++ } - if len(m.Spec.LoadBalancerProfile.OutboundIPPrefixes) > 0 { + if len(loadBalancerProfile.OutboundIPPrefixes) > 0 { numOutboundIPTypes++ } - if len(m.Spec.LoadBalancerProfile.OutboundIPs) > 0 { + if len(loadBalancerProfile.OutboundIPs) > 0 { numOutboundIPTypes++ } if numOutboundIPTypes > 1 { - errs = append(errs, errors.New("load balancer profile must specify at most one of ManagedOutboundIPs, OutboundIPPrefixes and OutboundIPs")) - } - - if len(allErrs) > 0 { - agg := kerrors.NewAggregate(allErrs.ToAggregate().Errors()) - errs = append(errs, agg) + allErrs = append(allErrs, field.Forbidden(fldPath, "load balancer profile must specify at most one of ManagedOutboundIPs, OutboundIPPrefixes and OutboundIPs")) } - - return kerrors.NewAggregate(errs) } - return nil + return allErrs } // validateAPIServerAccessProfile validates an APIServerAccessProfile. @@ -412,30 +435,31 @@ func (m *AzureManagedControlPlane) validateAPIServerAccessProfile(_ client.Clien } // validateManagedClusterNetwork validates the Cluster network values. -func (m *AzureManagedControlPlane) validateManagedClusterNetwork(cli client.Client) error { +func validateManagedClusterNetwork(cli client.Client, labels map[string]string, namespace string, dnsServiceIP *string, subnet ManagedControlPlaneSubnet, fldPath *field.Path) field.ErrorList { + var ( + allErrs field.ErrorList + serviceCIDR string + ) + ctx := context.Background() // Fetch the Cluster. - clusterName, ok := m.Labels[clusterv1.ClusterNameLabel] + clusterName, ok := labels[clusterv1.ClusterNameLabel] if !ok { return nil } ownerCluster := &clusterv1.Cluster{} key := client.ObjectKey{ - Namespace: m.Namespace, + Namespace: namespace, Name: clusterName, } if err := cli.Get(ctx, key, ownerCluster); err != nil { - return err + allErrs = append(allErrs, field.InternalError(fldPath, err)) + return allErrs } - var ( - allErrs field.ErrorList - serviceCIDR string - ) - if clusterNetwork := ownerCluster.Spec.ClusterNetwork; clusterNetwork != nil { if clusterNetwork.Services != nil { // A user may provide zero or one CIDR blocks. If they provide an empty array, @@ -456,7 +480,7 @@ func (m *AzureManagedControlPlane) validateManagedClusterNetwork(cli client.Clie } } - if m.Spec.DNSServiceIP != nil { + if dnsServiceIP != nil { if serviceCIDR == "" { allErrs = append(allErrs, field.Required(field.NewPath("Cluster", "Spec", "ClusterNetwork", "Services", "CIDRBlocks"), "service CIDR must be specified if specifying DNSServiceIP")) } @@ -465,9 +489,9 @@ func (m *AzureManagedControlPlane) validateManagedClusterNetwork(cli client.Clie allErrs = append(allErrs, field.Invalid(field.NewPath("Cluster", "Spec", "ClusterNetwork", "Services", "CIDRBlocks"), serviceCIDR, fmt.Sprintf("failed to parse cluster service cidr: %v", err))) } - dnsIP := net.ParseIP(*m.Spec.DNSServiceIP) + dnsIP := net.ParseIP(*dnsServiceIP) if dnsIP == nil { // dnsIP will be nil if the string is not a valid IP - allErrs = append(allErrs, field.Invalid(field.NewPath("Spec", "DNSServiceIP"), *m.Spec.DNSServiceIP, "must be a valid IP address")) + allErrs = append(allErrs, field.Invalid(field.NewPath("Cluster", "Spec", "ClusterNetwork", "Services", "DNSServiceIP"), *dnsServiceIP, "must be a valid IP address")) } if dnsIP != nil && !cidr.Contains(dnsIP) { @@ -478,18 +502,15 @@ func (m *AzureManagedControlPlane) validateManagedClusterNetwork(cli client.Clie // Refer to: https://learn.microsoft.com/en-us/azure/aks/configure-kubenet#create-an-aks-cluster-with-system-assigned-managed-identities targetSuffix := ".10" if dnsIP != nil && !strings.HasSuffix(dnsIP.String(), targetSuffix) { - allErrs = append(allErrs, field.Invalid(field.NewPath("Spec", "DNSServiceIP"), *m.Spec.DNSServiceIP, fmt.Sprintf("must end with %q", targetSuffix))) + allErrs = append(allErrs, field.Invalid(field.NewPath("Cluster", "Spec", "ClusterNetwork", "Services", "DNSServiceIP"), *dnsServiceIP, fmt.Sprintf("must end with %q", targetSuffix))) } } - if errs := validatePrivateEndpoints(m.Spec.VirtualNetwork.Subnet.PrivateEndpoints, []string{m.Spec.VirtualNetwork.Subnet.CIDRBlock}, field.NewPath("Spec", "VirtualNetwork.Subnet.PrivateEndpoints")); len(errs) > 0 { + if errs := validatePrivateEndpoints(subnet.PrivateEndpoints, []string{subnet.CIDRBlock}, fldPath.Child("VirtualNetwork.Subnet.PrivateEndpoints")); len(errs) > 0 { allErrs = append(allErrs, errs...) } - if len(allErrs) > 0 { - return kerrors.NewAggregate(allErrs.ToAggregate().Errors()) - } - return nil + return allErrs } // validateAPIServerAccessProfileUpdate validates update to APIServerAccessProfile. @@ -656,153 +677,150 @@ func (m *AzureManagedControlPlane) validateOIDCIssuerProfileUpdate(old *AzureMan return allErrs } -func (m *AzureManagedControlPlane) validateName(_ client.Client) error { - if lName := strings.ToLower(m.Name); strings.Contains(lName, "microsoft") || +func validateName(name string, fldPath *field.Path) field.ErrorList { + var allErrs field.ErrorList + if lName := strings.ToLower(name); strings.Contains(lName, "microsoft") || strings.Contains(lName, "windows") { - return field.Invalid(field.NewPath("Name"), m.Name, - "cluster name is invalid because 'MICROSOFT' and 'WINDOWS' can't be used as either a whole word or a substring in the name") + allErrs = append(allErrs, field.Invalid(fldPath.Child("Name"), name, + "cluster name is invalid because 'MICROSOFT' and 'WINDOWS' can't be used as either a whole word or a substring in the name")) } - return nil + return allErrs } // validateAutoScalerProfile validates an AutoScalerProfile. -func (m *AzureManagedControlPlane) validateAutoScalerProfile(_ client.Client) error { +func validateAutoScalerProfile(autoScalerProfile *AutoScalerProfile, fldPath *field.Path) field.ErrorList { var allErrs field.ErrorList - if m.Spec.AutoScalerProfile == nil { + if autoScalerProfile == nil { return nil } - if errs := m.validateIntegerStringGreaterThanZero(m.Spec.AutoScalerProfile.MaxEmptyBulkDelete, "MaxEmptyBulkDelete"); len(errs) > 0 { + if errs := validateIntegerStringGreaterThanZero(autoScalerProfile.MaxEmptyBulkDelete, fldPath, "MaxEmptyBulkDelete"); len(errs) > 0 { allErrs = append(allErrs, errs...) } - if errs := m.validateIntegerStringGreaterThanZero(m.Spec.AutoScalerProfile.MaxGracefulTerminationSec, "MaxGracefulTerminationSec"); len(errs) > 0 { + if errs := validateIntegerStringGreaterThanZero(autoScalerProfile.MaxGracefulTerminationSec, fldPath, "MaxGracefulTerminationSec"); len(errs) > 0 { allErrs = append(allErrs, errs...) } - if errs := m.validateMaxNodeProvisionTime(); len(errs) > 0 { + if errs := validateMaxNodeProvisionTime(autoScalerProfile.MaxNodeProvisionTime, fldPath); len(errs) > 0 { allErrs = append(allErrs, errs...) } - if m.Spec.AutoScalerProfile.MaxTotalUnreadyPercentage != nil { - val, err := strconv.Atoi(*m.Spec.AutoScalerProfile.MaxTotalUnreadyPercentage) + if autoScalerProfile.MaxTotalUnreadyPercentage != nil { + val, err := strconv.Atoi(*autoScalerProfile.MaxTotalUnreadyPercentage) if err != nil || val < 0 || val > 100 { - allErrs = append(allErrs, field.Invalid(field.NewPath("Spec", "AutoscalerProfile", "MaxTotalUnreadyPercentage"), m.Spec.AutoScalerProfile.MaxTotalUnreadyPercentage, "invalid value")) + allErrs = append(allErrs, field.Invalid(field.NewPath("Spec", "AutoscalerProfile", "MaxTotalUnreadyPercentage"), autoScalerProfile.MaxTotalUnreadyPercentage, "invalid value")) } } - if errs := m.validateNewPodScaleUpDelay(); len(errs) > 0 { + if errs := validateNewPodScaleUpDelay(autoScalerProfile.NewPodScaleUpDelay, fldPath); len(errs) > 0 { allErrs = append(allErrs, errs...) } - if errs := m.validateIntegerStringGreaterThanZero(m.Spec.AutoScalerProfile.OkTotalUnreadyCount, "OkTotalUnreadyCount"); len(errs) > 0 { + if errs := validateIntegerStringGreaterThanZero(autoScalerProfile.OkTotalUnreadyCount, fldPath, "OkTotalUnreadyCount"); len(errs) > 0 { allErrs = append(allErrs, errs...) } - if errs := m.validateScanInterval(); len(errs) > 0 { + if errs := validateScanInterval(autoScalerProfile.ScanInterval, fldPath); len(errs) > 0 { allErrs = append(allErrs, errs...) } - if errs := m.validateScaleDownTime(m.Spec.AutoScalerProfile.ScaleDownDelayAfterAdd, "ScaleDownDelayAfterAdd"); len(errs) > 0 { + if errs := validateScaleDownTime(autoScalerProfile.ScaleDownDelayAfterAdd, fldPath, "ScaleDownDelayAfterAdd"); len(errs) > 0 { allErrs = append(allErrs, errs...) } - if errs := m.validateScaleDownDelayAfterDelete(); len(errs) > 0 { + if errs := validateScaleDownDelayAfterDelete(autoScalerProfile.ScaleDownDelayAfterDelete, fldPath); len(errs) > 0 { allErrs = append(allErrs, errs...) } - if errs := m.validateScaleDownTime(m.Spec.AutoScalerProfile.ScaleDownDelayAfterFailure, "ScaleDownDelayAfterFailure"); len(errs) > 0 { + if errs := validateScaleDownTime(autoScalerProfile.ScaleDownDelayAfterFailure, fldPath, "ScaleDownDelayAfterFailure"); len(errs) > 0 { allErrs = append(allErrs, errs...) } - if errs := m.validateScaleDownTime(m.Spec.AutoScalerProfile.ScaleDownUnneededTime, "ScaleDownUnneededTime"); len(errs) > 0 { + if errs := validateScaleDownTime(autoScalerProfile.ScaleDownUnneededTime, fldPath, "ScaleDownUnneededTime"); len(errs) > 0 { allErrs = append(allErrs, errs...) } - if errs := m.validateScaleDownTime(m.Spec.AutoScalerProfile.ScaleDownUnreadyTime, "ScaleDownUnreadyTime"); len(errs) > 0 { + if errs := validateScaleDownTime(autoScalerProfile.ScaleDownUnreadyTime, fldPath, "ScaleDownUnreadyTime"); len(errs) > 0 { allErrs = append(allErrs, errs...) } - if m.Spec.AutoScalerProfile.ScaleDownUtilizationThreshold != nil { - val, err := strconv.ParseFloat(*m.Spec.AutoScalerProfile.ScaleDownUtilizationThreshold, 32) + if autoScalerProfile.ScaleDownUtilizationThreshold != nil { + val, err := strconv.ParseFloat(*autoScalerProfile.ScaleDownUtilizationThreshold, 32) if err != nil || val < 0 || val > 1 { - allErrs = append(allErrs, field.Invalid(field.NewPath("Spec", "AutoscalerProfile", "ScaleDownUtilizationThreshold"), m.Spec.AutoScalerProfile.ScaleDownUtilizationThreshold, "invalid value")) + allErrs = append(allErrs, field.Invalid(field.NewPath("Spec", "AutoscalerProfile", "ScaleDownUtilizationThreshold"), autoScalerProfile.ScaleDownUtilizationThreshold, "invalid value")) } } - if len(allErrs) > 0 { - return kerrors.NewAggregate(allErrs.ToAggregate().Errors()) - } - - return nil + return allErrs } // validateMaxNodeProvisionTime validates update to AutoscalerProfile.MaxNodeProvisionTime. -func (m *AzureManagedControlPlane) validateMaxNodeProvisionTime() field.ErrorList { +func validateMaxNodeProvisionTime(maxNodeProvisionTime *string, fldPath *field.Path) field.ErrorList { var allErrs field.ErrorList - if ptr.Deref(m.Spec.AutoScalerProfile.MaxNodeProvisionTime, "") != "" { - if !rMaxNodeProvisionTime.MatchString(ptr.Deref(m.Spec.AutoScalerProfile.MaxNodeProvisionTime, "")) { - allErrs = append(allErrs, field.Invalid(field.NewPath("Spec", "AutoscalerProfile", "MaxNodeProvisionTime"), m.Spec.AutoScalerProfile.MaxNodeProvisionTime, "invalid value")) + if ptr.Deref(maxNodeProvisionTime, "") != "" { + if !rMaxNodeProvisionTime.MatchString(ptr.Deref(maxNodeProvisionTime, "")) { + allErrs = append(allErrs, field.Invalid(fldPath.Child("MaxNodeProvisionTime"), maxNodeProvisionTime, "invalid value")) } } return allErrs } // validateScanInterval validates update to AutoscalerProfile.ScanInterval. -func (m *AzureManagedControlPlane) validateScanInterval() field.ErrorList { +func validateScanInterval(scanInterval *string, fldPath *field.Path) field.ErrorList { var allErrs field.ErrorList - if ptr.Deref(m.Spec.AutoScalerProfile.ScanInterval, "") != "" { - if !rScanInterval.MatchString(ptr.Deref(m.Spec.AutoScalerProfile.ScanInterval, "")) { - allErrs = append(allErrs, field.Invalid(field.NewPath("Spec", "AutoscalerProfile", "ScanInterval"), m.Spec.AutoScalerProfile.ScanInterval, "invalid value")) + if ptr.Deref(scanInterval, "") != "" { + if !rScanInterval.MatchString(ptr.Deref(scanInterval, "")) { + allErrs = append(allErrs, field.Invalid(fldPath.Child("ScanInterval"), scanInterval, "invalid value")) } } return allErrs } // validateNewPodScaleUpDelay validates update to AutoscalerProfile.NewPodScaleUpDelay. -func (m *AzureManagedControlPlane) validateNewPodScaleUpDelay() field.ErrorList { +func validateNewPodScaleUpDelay(newPodScaleUpDelay *string, fldPath *field.Path) field.ErrorList { var allErrs field.ErrorList - if ptr.Deref(m.Spec.AutoScalerProfile.NewPodScaleUpDelay, "") != "" { - _, err := time.ParseDuration(ptr.Deref(m.Spec.AutoScalerProfile.NewPodScaleUpDelay, "")) + if ptr.Deref(newPodScaleUpDelay, "") != "" { + _, err := time.ParseDuration(ptr.Deref(newPodScaleUpDelay, "")) if err != nil { - allErrs = append(allErrs, field.Invalid(field.NewPath("Spec", "AutoscalerProfile", "NewPodScaleUpDelay"), m.Spec.AutoScalerProfile.NewPodScaleUpDelay, "invalid value")) + allErrs = append(allErrs, field.Invalid(fldPath.Child("NewPodScaleUpDelay"), newPodScaleUpDelay, "invalid value")) } } return allErrs } // validateScaleDownDelayAfterDelete validates update to AutoscalerProfile.ScaleDownDelayAfterDelete value. -func (m *AzureManagedControlPlane) validateScaleDownDelayAfterDelete() field.ErrorList { +func validateScaleDownDelayAfterDelete(scaleDownDelayAfterDelete *string, fldPath *field.Path) field.ErrorList { var allErrs field.ErrorList - if ptr.Deref(m.Spec.AutoScalerProfile.ScaleDownDelayAfterDelete, "") != "" { - if !rScaleDownDelayAfterDelete.MatchString(ptr.Deref(m.Spec.AutoScalerProfile.ScaleDownDelayAfterDelete, "")) { - allErrs = append(allErrs, field.Invalid(field.NewPath("Spec", "AutoscalerProfile", "ScaleDownDelayAfterDelete"), ptr.Deref(m.Spec.AutoScalerProfile.ScaleDownDelayAfterDelete, ""), "invalid value")) + if ptr.Deref(scaleDownDelayAfterDelete, "") != "" { + if !rScaleDownDelayAfterDelete.MatchString(ptr.Deref(scaleDownDelayAfterDelete, "")) { + allErrs = append(allErrs, field.Invalid(fldPath.Child("ScaleDownDelayAfterDelete"), ptr.Deref(scaleDownDelayAfterDelete, ""), "invalid value")) } } return allErrs } // validateScaleDownTime validates update to AutoscalerProfile.ScaleDown* values. -func (m *AzureManagedControlPlane) validateScaleDownTime(scaleDownValue *string, fieldName string) field.ErrorList { +func validateScaleDownTime(scaleDownValue *string, fldPath *field.Path, fieldName string) field.ErrorList { var allErrs field.ErrorList if ptr.Deref(scaleDownValue, "") != "" { if !rScaleDownTime.MatchString(ptr.Deref(scaleDownValue, "")) { - allErrs = append(allErrs, field.Invalid(field.NewPath("Spec", "AutoscalerProfile", fieldName), ptr.Deref(scaleDownValue, ""), "invalid value")) + allErrs = append(allErrs, field.Invalid(fldPath.Child(fieldName), ptr.Deref(scaleDownValue, ""), "invalid value")) } } return allErrs } // validateIntegerStringGreaterThanZero validates that a string value is an integer greater than zero. -func (m *AzureManagedControlPlane) validateIntegerStringGreaterThanZero(input *string, fieldName string) field.ErrorList { +func validateIntegerStringGreaterThanZero(input *string, fldPath *field.Path, fieldName string) field.ErrorList { var allErrs field.ErrorList if input != nil { val, err := strconv.Atoi(*input) if err != nil || val < 0 { - allErrs = append(allErrs, field.Invalid(field.NewPath("Spec", "AutoscalerProfile", fieldName), input, "invalid value")) + allErrs = append(allErrs, field.Invalid(fldPath.Child(fieldName), input, "invalid value")) } } diff --git a/api/v1beta1/azuremanagedcontrolplane_webhook_test.go b/api/v1beta1/azuremanagedcontrolplane_webhook_test.go index 904780e3e3d..2aebcaa9705 100644 --- a/api/v1beta1/azuremanagedcontrolplane_webhook_test.go +++ b/api/v1beta1/azuremanagedcontrolplane_webhook_test.go @@ -22,6 +22,7 @@ import ( . "github.com/onsi/gomega" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/validation/field" utilfeature "k8s.io/component-base/featuregate/testing" "k8s.io/utils/ptr" "sigs.k8s.io/cluster-api-provider-azure/feature" @@ -113,6 +114,326 @@ func TestDefaultingWebhook(t *testing.T) { g.Expect(amcp.Spec.VirtualNetwork.Subnet.CIDRBlock).To(Equal(defaultAKSNodeSubnetCIDRForOverlay)) } +func TestValidateDNSServiceIP(t *testing.T) { + g := NewWithT(t) + tests := []struct { + name string + dnsIP *string + expectErr bool + }{ + { + name: "Testing valid DNSServiceIP", + dnsIP: ptr.To("192.168.0.0"), + expectErr: false, + }, + { + name: "Testing invalid DNSServiceIP", + dnsIP: ptr.To("192.168.0.0.3"), + expectErr: true, + }, + } + + for _, tt := range tests { + tt := tt + t.Run(tt.name, func(t *testing.T) { + allErrs := validateDNSServiceIP(tt.dnsIP, field.NewPath("spec").Child("DNSServiceIP")) + if tt.expectErr { + g.Expect(allErrs).NotTo(BeNil()) + } else { + g.Expect(allErrs).To(BeNil()) + } + }) + } +} + +func TestValidateVersion(t *testing.T) { + g := NewWithT(t) + tests := []struct { + name string + version string + expectErr bool + }{ + { + name: "Invalid Version", + version: "honk", + expectErr: true, + }, + { + name: "not following the Kubernetes Version pattern", + version: "1.19.0", + expectErr: true, + }, + { + name: "Version not set", + version: "", + expectErr: true, + }, + { + name: "Valid Version", + version: "v1.17.8", + expectErr: false, + }, + } + + for _, tt := range tests { + tt := tt + t.Run(tt.name, func(t *testing.T) { + allErrs := validateVersion(tt.version, field.NewPath("spec").Child("Version")) + if tt.expectErr { + g.Expect(allErrs).NotTo(BeNil()) + } else { + g.Expect(allErrs).To(BeNil()) + } + }) + } +} + +func TestValidateLoadBalancerProfile(t *testing.T) { + g := NewWithT(t) + tests := []struct { + name string + profile *LoadBalancerProfile + expectErr bool + }{ + { + name: "Valid LoadBalancerProfile", + profile: &LoadBalancerProfile{ + ManagedOutboundIPs: ptr.To[int32](10), + AllocatedOutboundPorts: ptr.To[int32](1000), + IdleTimeoutInMinutes: ptr.To[int32](60), + }, + expectErr: false, + }, + { + name: "Invalid LoadBalancerProfile.ManagedOutboundIPs", + profile: &LoadBalancerProfile{ + ManagedOutboundIPs: ptr.To[int32](200), + }, + expectErr: true, + }, + { + name: "Invalid LoadBalancerProfile.IdleTimeoutInMinutes", + profile: &LoadBalancerProfile{ + IdleTimeoutInMinutes: ptr.To[int32](600), + }, + expectErr: true, + }, + { + name: "LoadBalancerProfile must specify at most one of ManagedOutboundIPs, OutboundIPPrefixes and OutboundIPs", + profile: &LoadBalancerProfile{ + ManagedOutboundIPs: ptr.To[int32](1), + OutboundIPs: []string{ + "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/foo-bar/providers/Microsoft.Network/publicIPAddresses/my-public-ip", + }, + }, + expectErr: true, + }, + } + for _, tt := range tests { + tt := tt + t.Run(tt.name, func(t *testing.T) { + allErrs := validateLoadBalancerProfile(tt.profile, field.NewPath("spec").Child("LoadBalancerProfile")) + if tt.expectErr { + g.Expect(allErrs).NotTo(BeNil()) + } else { + g.Expect(allErrs).To(BeNil()) + } + }) + } +} + +func TestValidateAutoScalerProfile(t *testing.T) { + g := NewWithT(t) + tests := []struct { + name string + profile *AutoScalerProfile + expectErr bool + }{ + { + name: "Valid AutoScalerProfile", + profile: &AutoScalerProfile{ + BalanceSimilarNodeGroups: (*BalanceSimilarNodeGroups)(ptr.To(string(BalanceSimilarNodeGroupsFalse))), + Expander: (*Expander)(ptr.To(string(ExpanderRandom))), + MaxEmptyBulkDelete: ptr.To("10"), + MaxGracefulTerminationSec: ptr.To("600"), + MaxNodeProvisionTime: ptr.To("10m"), + MaxTotalUnreadyPercentage: ptr.To("45"), + NewPodScaleUpDelay: ptr.To("10m"), + OkTotalUnreadyCount: ptr.To("3"), + ScanInterval: ptr.To("60s"), + ScaleDownDelayAfterAdd: ptr.To("10m"), + ScaleDownDelayAfterDelete: ptr.To("10s"), + ScaleDownDelayAfterFailure: ptr.To("10m"), + ScaleDownUnneededTime: ptr.To("10m"), + ScaleDownUnreadyTime: ptr.To("10m"), + ScaleDownUtilizationThreshold: ptr.To("0.5"), + SkipNodesWithLocalStorage: (*SkipNodesWithLocalStorage)(ptr.To(string(SkipNodesWithLocalStorageTrue))), + SkipNodesWithSystemPods: (*SkipNodesWithSystemPods)(ptr.To(string(SkipNodesWithSystemPodsTrue))), + }, + expectErr: false, + }, + { + name: "Testing valid AutoScalerProfile.ExpanderRandom", + profile: &AutoScalerProfile{ + Expander: (*Expander)(ptr.To(string(ExpanderRandom))), + }, + expectErr: false, + }, + { + name: "Testing valid AutoScalerProfile.ExpanderLeastWaste", + profile: &AutoScalerProfile{ + Expander: (*Expander)(ptr.To(string(ExpanderLeastWaste))), + }, + expectErr: false, + }, + { + name: "Testing valid AutoScalerProfile.ExpanderMostPods", + profile: &AutoScalerProfile{ + Expander: (*Expander)(ptr.To(string(ExpanderMostPods))), + }, + expectErr: false, + }, + { + name: "Testing valid AutoScalerProfile.ExpanderPriority", + profile: &AutoScalerProfile{ + Expander: (*Expander)(ptr.To(string(ExpanderPriority))), + }, + expectErr: false, + }, + { + name: "Testing valid AutoScalerProfile.BalanceSimilarNodeGroupsTrue", + profile: &AutoScalerProfile{ + BalanceSimilarNodeGroups: (*BalanceSimilarNodeGroups)(ptr.To(string(BalanceSimilarNodeGroupsTrue))), + }, + expectErr: false, + }, + { + name: "Testing valid AutoScalerProfile.BalanceSimilarNodeGroupsFalse", + profile: &AutoScalerProfile{ + BalanceSimilarNodeGroups: (*BalanceSimilarNodeGroups)(ptr.To(string(BalanceSimilarNodeGroupsFalse))), + }, + expectErr: false, + }, + { + name: "Testing invalid AutoScalerProfile.MaxEmptyBulkDelete", + profile: &AutoScalerProfile{ + MaxEmptyBulkDelete: ptr.To("invalid"), + }, + expectErr: true, + }, + { + name: "Testing invalid AutoScalerProfile.MaxGracefulTerminationSec", + profile: &AutoScalerProfile{ + MaxGracefulTerminationSec: ptr.To("invalid"), + }, + expectErr: true, + }, + { + name: "Testing invalid AutoScalerProfile.MaxNodeProvisionTime", + profile: &AutoScalerProfile{ + MaxNodeProvisionTime: ptr.To("invalid"), + }, + expectErr: true, + }, + { + name: "Testing invalid AutoScalerProfile.MaxTotalUnreadyPercentage", + profile: &AutoScalerProfile{ + MaxTotalUnreadyPercentage: ptr.To("invalid"), + }, + expectErr: true, + }, + { + name: "Testing invalid AutoScalerProfile.NewPodScaleUpDelay", + profile: &AutoScalerProfile{ + NewPodScaleUpDelay: ptr.To("invalid"), + }, + expectErr: true, + }, + { + name: "Testing invalid AutoScalerProfile.OkTotalUnreadyCount", + profile: &AutoScalerProfile{ + OkTotalUnreadyCount: ptr.To("invalid"), + }, + expectErr: true, + }, + { + name: "Testing invalid AutoScalerProfile.ScanInterval", + profile: &AutoScalerProfile{ + ScanInterval: ptr.To("invalid"), + }, + expectErr: true, + }, + { + name: "Testing invalid AutoScalerProfile.ScaleDownDelayAfterAdd", + profile: &AutoScalerProfile{ + ScaleDownDelayAfterAdd: ptr.To("invalid"), + }, + expectErr: true, + }, + { + name: "Testing invalid AutoScalerProfile.ScaleDownDelayAfterDelete", + profile: &AutoScalerProfile{ + ScaleDownDelayAfterDelete: ptr.To("invalid"), + }, + expectErr: true, + }, + { + name: "Testing invalid AutoScalerProfile.ScaleDownDelayAfterFailure", + profile: &AutoScalerProfile{ + ScaleDownDelayAfterFailure: ptr.To("invalid"), + }, + expectErr: true, + }, + { + name: "Testing invalid AutoScalerProfile.ScaleDownUnneededTime", + profile: &AutoScalerProfile{ + ScaleDownUnneededTime: ptr.To("invalid"), + }, + expectErr: true, + }, + { + name: "Testing invalid AutoScalerProfile.ScaleDownUnreadyTime", + profile: &AutoScalerProfile{ + ScaleDownUnreadyTime: ptr.To("invalid"), + }, + expectErr: true, + }, + { + name: "Testing invalid AutoScalerProfile.ScaleDownUtilizationThreshold", + profile: &AutoScalerProfile{ + ScaleDownUtilizationThreshold: ptr.To("invalid"), + }, + expectErr: true, + }, + { + name: "Testing valid AutoScalerProfile.SkipNodesWithLocalStorageTrue", + profile: &AutoScalerProfile{ + SkipNodesWithLocalStorage: (*SkipNodesWithLocalStorage)(ptr.To(string(SkipNodesWithLocalStorageTrue))), + }, + expectErr: false, + }, + { + name: "Testing valid AutoScalerProfile.SkipNodesWithLocalStorageFalse", + profile: &AutoScalerProfile{ + SkipNodesWithSystemPods: (*SkipNodesWithSystemPods)(ptr.To(string(SkipNodesWithSystemPodsFalse))), + }, + expectErr: false, + }, + } + + for _, tt := range tests { + tt := tt + t.Run(tt.name, func(t *testing.T) { + allErrs := validateAutoScalerProfile(tt.profile, field.NewPath("spec").Child("AutoScalerProfile")) + if tt.expectErr { + g.Expect(allErrs).NotTo(BeNil()) + } else { + g.Expect(allErrs).To(BeNil()) + } + }) + } +} + func TestValidatingWebhook(t *testing.T) { // NOTE: AzureManageControlPlane is behind AKS feature gate flag; the webhook // must prevent creating new objects in case the feature flag is disabled. @@ -756,7 +1077,7 @@ func TestAzureManagedControlPlane_ValidateCreate(t *testing.T) { name: "invalid DNSServiceIP", amcp: createAzureManagedControlPlane("192.168.0.10.3", "v1.18.0", generateSSHPublicKey(true)), wantErr: true, - errorLen: 1, + errorLen: 2, }, { name: "invalid DNSServiceIP", @@ -774,7 +1095,7 @@ func TestAzureManagedControlPlane_ValidateCreate(t *testing.T) { name: "invalid sshKey with a simple text and invalid DNSServiceIP", amcp: createAzureManagedControlPlane("192.168.0.10.3", "v1.18.0", "invalid_sshkey_honk"), wantErr: true, - errorLen: 2, + errorLen: 3, }, { name: "invalid version", diff --git a/api/v1beta1/azuremanagedcontrolplanetemplate_default.go b/api/v1beta1/azuremanagedcontrolplanetemplate_default.go new file mode 100644 index 00000000000..1173fbe333d --- /dev/null +++ b/api/v1beta1/azuremanagedcontrolplanetemplate_default.go @@ -0,0 +1,133 @@ +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + "strings" + + "k8s.io/utils/ptr" +) + +func (mcp *AzureManagedControlPlaneTemplate) setDefaults() { + if mcp.Spec.Template.Spec.NetworkPlugin == nil { + networkPlugin := CloudProviderName + mcp.Spec.Template.Spec.NetworkPlugin = &networkPlugin + } + if mcp.Spec.Template.Spec.LoadBalancerSKU == nil { + loadBalancerSKU := "Standard" + mcp.Spec.Template.Spec.LoadBalancerSKU = &loadBalancerSKU + } + + if mcp.Spec.Template.Spec.Version != "" && !strings.HasPrefix(mcp.Spec.Template.Spec.Version, "v") { + normalizedVersion := "v" + mcp.Spec.Template.Spec.Version + mcp.Spec.Template.Spec.Version = normalizedVersion + } + + mcp.setDefaultVirtualNetwork() + mcp.setDefaultSubnet() + mcp.setDefaultSku() + mcp.setDefaultAutoScalerProfile() +} + +// setDefaultVirtualNetwork sets the default VirtualNetwork for an AzureManagedControlPlaneTemplate. +func (mcp *AzureManagedControlPlaneTemplate) setDefaultVirtualNetwork() { + if mcp.Spec.Template.Spec.VirtualNetwork.Name == "" { + mcp.Spec.Template.Spec.VirtualNetwork.Name = mcp.Name + } + if mcp.Spec.Template.Spec.VirtualNetwork.CIDRBlock == "" { + mcp.Spec.Template.Spec.VirtualNetwork.CIDRBlock = defaultAKSVnetCIDR + } +} + +// setDefaultSubnet sets the default Subnet for an AzureManagedControlPlaneTemplate. +func (mcp *AzureManagedControlPlaneTemplate) setDefaultSubnet() { + if mcp.Spec.Template.Spec.VirtualNetwork.Subnet.Name == "" { + mcp.Spec.Template.Spec.VirtualNetwork.Subnet.Name = mcp.Name + } + if mcp.Spec.Template.Spec.VirtualNetwork.Subnet.CIDRBlock == "" { + mcp.Spec.Template.Spec.VirtualNetwork.Subnet.CIDRBlock = defaultAKSNodeSubnetCIDR + } +} + +func (mcp *AzureManagedControlPlaneTemplate) setDefaultSku() { + if mcp.Spec.Template.Spec.SKU == nil { + mcp.Spec.Template.Spec.SKU = &AKSSku{ + Tier: FreeManagedControlPlaneTier, + } + } +} + +func (mcp *AzureManagedControlPlaneTemplate) setDefaultAutoScalerProfile() { + if mcp.Spec.Template.Spec.AutoScalerProfile == nil { + return + } + + // Default values are from https://learn.microsoft.com/en-us/azure/aks/cluster-autoscaler#using-the-autoscaler-profile + // If any values are set, they all need to be set. + if mcp.Spec.Template.Spec.AutoScalerProfile.BalanceSimilarNodeGroups == nil { + mcp.Spec.Template.Spec.AutoScalerProfile.BalanceSimilarNodeGroups = (*BalanceSimilarNodeGroups)(ptr.To(string(BalanceSimilarNodeGroupsFalse))) + } + if mcp.Spec.Template.Spec.AutoScalerProfile.Expander == nil { + mcp.Spec.Template.Spec.AutoScalerProfile.Expander = (*Expander)(ptr.To(string(ExpanderRandom))) + } + if mcp.Spec.Template.Spec.AutoScalerProfile.MaxEmptyBulkDelete == nil { + mcp.Spec.Template.Spec.AutoScalerProfile.MaxEmptyBulkDelete = ptr.To("10") + } + if mcp.Spec.Template.Spec.AutoScalerProfile.MaxGracefulTerminationSec == nil { + mcp.Spec.Template.Spec.AutoScalerProfile.MaxGracefulTerminationSec = ptr.To("600") + } + if mcp.Spec.Template.Spec.AutoScalerProfile.MaxNodeProvisionTime == nil { + mcp.Spec.Template.Spec.AutoScalerProfile.MaxNodeProvisionTime = ptr.To("15m") + } + if mcp.Spec.Template.Spec.AutoScalerProfile.MaxTotalUnreadyPercentage == nil { + mcp.Spec.Template.Spec.AutoScalerProfile.MaxTotalUnreadyPercentage = ptr.To("45") + } + if mcp.Spec.Template.Spec.AutoScalerProfile.NewPodScaleUpDelay == nil { + mcp.Spec.Template.Spec.AutoScalerProfile.NewPodScaleUpDelay = ptr.To("0s") + } + if mcp.Spec.Template.Spec.AutoScalerProfile.OkTotalUnreadyCount == nil { + mcp.Spec.Template.Spec.AutoScalerProfile.OkTotalUnreadyCount = ptr.To("3") + } + if mcp.Spec.Template.Spec.AutoScalerProfile.ScanInterval == nil { + mcp.Spec.Template.Spec.AutoScalerProfile.ScanInterval = ptr.To("10s") + } + if mcp.Spec.Template.Spec.AutoScalerProfile.ScaleDownDelayAfterAdd == nil { + mcp.Spec.Template.Spec.AutoScalerProfile.ScaleDownDelayAfterAdd = ptr.To("10m") + } + if mcp.Spec.Template.Spec.AutoScalerProfile.ScaleDownDelayAfterDelete == nil { + // Default is the same as the ScanInterval so default to that same value if it isn't set + mcp.Spec.Template.Spec.AutoScalerProfile.ScaleDownDelayAfterDelete = mcp.Spec.Template.Spec.AutoScalerProfile.ScanInterval + } + if mcp.Spec.Template.Spec.AutoScalerProfile.ScaleDownDelayAfterFailure == nil { + mcp.Spec.Template.Spec.AutoScalerProfile.ScaleDownDelayAfterFailure = ptr.To("3m") + } + if mcp.Spec.Template.Spec.AutoScalerProfile.ScaleDownUnneededTime == nil { + mcp.Spec.Template.Spec.AutoScalerProfile.ScaleDownUnneededTime = ptr.To("10m") + } + if mcp.Spec.Template.Spec.AutoScalerProfile.ScaleDownUnreadyTime == nil { + mcp.Spec.Template.Spec.AutoScalerProfile.ScaleDownUnreadyTime = ptr.To("20m") + } + if mcp.Spec.Template.Spec.AutoScalerProfile.ScaleDownUtilizationThreshold == nil { + mcp.Spec.Template.Spec.AutoScalerProfile.ScaleDownUtilizationThreshold = ptr.To("0.5") + } + if mcp.Spec.Template.Spec.AutoScalerProfile.SkipNodesWithLocalStorage == nil { + mcp.Spec.Template.Spec.AutoScalerProfile.SkipNodesWithLocalStorage = (*SkipNodesWithLocalStorage)(ptr.To(string(SkipNodesWithLocalStorageFalse))) + } + if mcp.Spec.Template.Spec.AutoScalerProfile.SkipNodesWithSystemPods == nil { + mcp.Spec.Template.Spec.AutoScalerProfile.SkipNodesWithSystemPods = (*SkipNodesWithSystemPods)(ptr.To(string(SkipNodesWithSystemPodsTrue))) + } +} diff --git a/api/v1beta1/azuremanagedcontrolplanetemplate_default_test.go b/api/v1beta1/azuremanagedcontrolplanetemplate_default_test.go new file mode 100644 index 00000000000..f3b51ef00dd --- /dev/null +++ b/api/v1beta1/azuremanagedcontrolplanetemplate_default_test.go @@ -0,0 +1,424 @@ +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + "encoding/json" + "reflect" + "testing" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/utils/ptr" +) + +func TestDefaultVirtualNetworkTemplate(t *testing.T) { + cases := []struct { + name string + controlPlaneTemplate *AzureManagedControlPlaneTemplate + outputTemplate *AzureManagedControlPlaneTemplate + }{ + { + name: "virtual network not specified", + controlPlaneTemplate: &AzureManagedControlPlaneTemplate{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-cluster-template", + }, + Spec: AzureManagedControlPlaneTemplateSpec{ + Template: AzureManagedControlPlaneTemplateResource{}, + }, + }, + outputTemplate: &AzureManagedControlPlaneTemplate{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-cluster-template", + }, + Spec: AzureManagedControlPlaneTemplateSpec{ + Template: AzureManagedControlPlaneTemplateResource{ + Spec: AzureManagedControlPlaneTemplateResourceSpec{ + VirtualNetwork: ManagedControlPlaneVirtualNetworkTemplate{ + Name: "test-cluster-template", + CIDRBlock: defaultAKSVnetCIDR, + }, + }, + }, + }, + }, + }, + { + name: "custom name", + controlPlaneTemplate: &AzureManagedControlPlaneTemplate{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-cluster-template", + }, + Spec: AzureManagedControlPlaneTemplateSpec{ + Template: AzureManagedControlPlaneTemplateResource{ + Spec: AzureManagedControlPlaneTemplateResourceSpec{ + VirtualNetwork: ManagedControlPlaneVirtualNetworkTemplate{ + Name: "custom-vnet-name", + }, + }, + }, + }, + }, + outputTemplate: &AzureManagedControlPlaneTemplate{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-cluster-template", + }, + Spec: AzureManagedControlPlaneTemplateSpec{ + Template: AzureManagedControlPlaneTemplateResource{ + Spec: AzureManagedControlPlaneTemplateResourceSpec{ + VirtualNetwork: ManagedControlPlaneVirtualNetworkTemplate{ + Name: "custom-vnet-name", + CIDRBlock: defaultAKSVnetCIDR, + }, + }, + }, + }, + }, + }, + { + name: "custom cidr block", + controlPlaneTemplate: &AzureManagedControlPlaneTemplate{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-cluster-template", + }, + Spec: AzureManagedControlPlaneTemplateSpec{ + Template: AzureManagedControlPlaneTemplateResource{ + Spec: AzureManagedControlPlaneTemplateResourceSpec{ + VirtualNetwork: ManagedControlPlaneVirtualNetworkTemplate{ + CIDRBlock: "10.0.0.16/24", + }, + }, + }, + }, + }, + outputTemplate: &AzureManagedControlPlaneTemplate{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-cluster-template", + }, + Spec: AzureManagedControlPlaneTemplateSpec{ + Template: AzureManagedControlPlaneTemplateResource{ + Spec: AzureManagedControlPlaneTemplateResourceSpec{ + VirtualNetwork: ManagedControlPlaneVirtualNetworkTemplate{ + Name: "test-cluster-template", + CIDRBlock: "10.0.0.16/24", + }, + }, + }, + }, + }, + }, + } + + for _, c := range cases { + tc := c + t.Run(tc.name, func(t *testing.T) { + t.Parallel() + tc.controlPlaneTemplate.setDefaultVirtualNetwork() + if !reflect.DeepEqual(tc.controlPlaneTemplate, tc.outputTemplate) { + expected, _ := json.MarshalIndent(tc.outputTemplate, "", "\t") + actual, _ := json.MarshalIndent(tc.controlPlaneTemplate, "", "\t") + t.Errorf("Expected %s, got %s", string(expected), string(actual)) + } + }) + } +} + +func TestDefaultSubnetTemplate(t *testing.T) { + cases := []struct { + name string + controlPlaneTemplate *AzureManagedControlPlaneTemplate + outputTemplate *AzureManagedControlPlaneTemplate + }{ + { + name: "subnet not specified", + controlPlaneTemplate: &AzureManagedControlPlaneTemplate{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-cluster-template", + }, + Spec: AzureManagedControlPlaneTemplateSpec{ + Template: AzureManagedControlPlaneTemplateResource{}, + }, + }, + outputTemplate: &AzureManagedControlPlaneTemplate{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-cluster-template", + }, + Spec: AzureManagedControlPlaneTemplateSpec{ + Template: AzureManagedControlPlaneTemplateResource{ + Spec: AzureManagedControlPlaneTemplateResourceSpec{ + VirtualNetwork: ManagedControlPlaneVirtualNetworkTemplate{ + Subnet: ManagedControlPlaneSubnet{ + Name: "test-cluster-template", + CIDRBlock: defaultAKSNodeSubnetCIDR, + }, + }, + }, + }, + }, + }, + }, + { + name: "custom name", + controlPlaneTemplate: &AzureManagedControlPlaneTemplate{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-cluster-template", + }, + Spec: AzureManagedControlPlaneTemplateSpec{ + Template: AzureManagedControlPlaneTemplateResource{ + Spec: AzureManagedControlPlaneTemplateResourceSpec{ + VirtualNetwork: ManagedControlPlaneVirtualNetworkTemplate{ + Subnet: ManagedControlPlaneSubnet{ + Name: "custom-subnet-name", + }, + }, + }, + }, + }, + }, + outputTemplate: &AzureManagedControlPlaneTemplate{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-cluster-template", + }, + Spec: AzureManagedControlPlaneTemplateSpec{ + Template: AzureManagedControlPlaneTemplateResource{ + Spec: AzureManagedControlPlaneTemplateResourceSpec{ + VirtualNetwork: ManagedControlPlaneVirtualNetworkTemplate{ + Subnet: ManagedControlPlaneSubnet{ + Name: "custom-subnet-name", + CIDRBlock: defaultAKSNodeSubnetCIDR, + }, + }, + }, + }, + }, + }, + }, + { + name: "custom cidr block", + controlPlaneTemplate: &AzureManagedControlPlaneTemplate{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-cluster-template", + }, + Spec: AzureManagedControlPlaneTemplateSpec{ + Template: AzureManagedControlPlaneTemplateResource{ + Spec: AzureManagedControlPlaneTemplateResourceSpec{ + VirtualNetwork: ManagedControlPlaneVirtualNetworkTemplate{ + Subnet: ManagedControlPlaneSubnet{ + CIDRBlock: "10.0.0.16/24", + }, + }, + }, + }, + }, + }, + outputTemplate: &AzureManagedControlPlaneTemplate{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-cluster-template", + }, + Spec: AzureManagedControlPlaneTemplateSpec{ + Template: AzureManagedControlPlaneTemplateResource{ + Spec: AzureManagedControlPlaneTemplateResourceSpec{ + VirtualNetwork: ManagedControlPlaneVirtualNetworkTemplate{ + Subnet: ManagedControlPlaneSubnet{ + Name: "test-cluster-template", + CIDRBlock: "10.0.0.16/24", + }, + }, + }, + }, + }, + }, + }, + } + + for _, c := range cases { + tc := c + t.Run(tc.name, func(t *testing.T) { + t.Parallel() + tc.controlPlaneTemplate.setDefaultSubnet() + if !reflect.DeepEqual(tc.controlPlaneTemplate, tc.outputTemplate) { + expected, _ := json.MarshalIndent(tc.outputTemplate, "", "\t") + actual, _ := json.MarshalIndent(tc.controlPlaneTemplate, "", "\t") + t.Errorf("Expected %s, got %s", string(expected), string(actual)) + } + }) + } +} + +func TestDefaultSkuTemplate(t *testing.T) { + cases := []struct { + name string + controlPlaneTemplate *AzureManagedControlPlaneTemplate + outputTemplate *AzureManagedControlPlaneTemplate + }{ + { + name: "sku not specified", + controlPlaneTemplate: &AzureManagedControlPlaneTemplate{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-cluster-template", + }, + Spec: AzureManagedControlPlaneTemplateSpec{ + Template: AzureManagedControlPlaneTemplateResource{}, + }, + }, + outputTemplate: &AzureManagedControlPlaneTemplate{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-cluster-template", + }, + Spec: AzureManagedControlPlaneTemplateSpec{ + Template: AzureManagedControlPlaneTemplateResource{ + Spec: AzureManagedControlPlaneTemplateResourceSpec{ + SKU: &AKSSku{ + Tier: FreeManagedControlPlaneTier, + }, + }, + }, + }, + }, + }, + { + name: "paid sku", + controlPlaneTemplate: &AzureManagedControlPlaneTemplate{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-cluster-template", + }, + Spec: AzureManagedControlPlaneTemplateSpec{ + Template: AzureManagedControlPlaneTemplateResource{ + Spec: AzureManagedControlPlaneTemplateResourceSpec{ + SKU: &AKSSku{ + Tier: PaidManagedControlPlaneTier, + }, + }, + }, + }, + }, + outputTemplate: &AzureManagedControlPlaneTemplate{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-cluster-template", + }, + Spec: AzureManagedControlPlaneTemplateSpec{ + Template: AzureManagedControlPlaneTemplateResource{ + Spec: AzureManagedControlPlaneTemplateResourceSpec{ + SKU: &AKSSku{ + Tier: PaidManagedControlPlaneTier, + }, + }, + }, + }, + }, + }, + } + + for _, c := range cases { + tc := c + t.Run(tc.name, func(t *testing.T) { + t.Parallel() + tc.controlPlaneTemplate.setDefaultSku() + if !reflect.DeepEqual(tc.controlPlaneTemplate, tc.outputTemplate) { + expected, _ := json.MarshalIndent(tc.outputTemplate, "", "\t") + actual, _ := json.MarshalIndent(tc.controlPlaneTemplate, "", "\t") + t.Errorf("Expected %s, got %s", string(expected), string(actual)) + } + }) + } +} + +func TestDefaultAutoScalerProfile(t *testing.T) { + cases := []struct { + name string + controlPlaneTemplate *AzureManagedControlPlaneTemplate + outputTemplate *AzureManagedControlPlaneTemplate + }{ + { + name: "autoscaler profile not specified", + controlPlaneTemplate: &AzureManagedControlPlaneTemplate{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-cluster-template", + }, + Spec: AzureManagedControlPlaneTemplateSpec{ + Template: AzureManagedControlPlaneTemplateResource{}, + }, + }, + outputTemplate: &AzureManagedControlPlaneTemplate{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-cluster-template", + }, + Spec: AzureManagedControlPlaneTemplateSpec{ + Template: AzureManagedControlPlaneTemplateResource{}, + }, + }, + }, + { + name: "autoscaler profile empty but specified", + controlPlaneTemplate: &AzureManagedControlPlaneTemplate{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-cluster-template", + }, + Spec: AzureManagedControlPlaneTemplateSpec{ + Template: AzureManagedControlPlaneTemplateResource{ + Spec: AzureManagedControlPlaneTemplateResourceSpec{ + AutoScalerProfile: &AutoScalerProfile{}, + }, + }, + }, + }, + outputTemplate: &AzureManagedControlPlaneTemplate{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-cluster-template", + }, + Spec: AzureManagedControlPlaneTemplateSpec{ + Template: AzureManagedControlPlaneTemplateResource{ + Spec: AzureManagedControlPlaneTemplateResourceSpec{ + AutoScalerProfile: &AutoScalerProfile{ + BalanceSimilarNodeGroups: (*BalanceSimilarNodeGroups)(ptr.To(string(BalanceSimilarNodeGroupsFalse))), + Expander: (*Expander)(ptr.To(string(ExpanderRandom))), + MaxEmptyBulkDelete: ptr.To("10"), + MaxGracefulTerminationSec: ptr.To("600"), + MaxNodeProvisionTime: ptr.To("15m"), + MaxTotalUnreadyPercentage: ptr.To("45"), + NewPodScaleUpDelay: ptr.To("0s"), + OkTotalUnreadyCount: ptr.To("3"), + ScanInterval: ptr.To("10s"), + ScaleDownDelayAfterAdd: ptr.To("10m"), + ScaleDownDelayAfterDelete: ptr.To("10s"), + ScaleDownDelayAfterFailure: ptr.To("3m"), + ScaleDownUnneededTime: ptr.To("10m"), + ScaleDownUnreadyTime: ptr.To("20m"), + ScaleDownUtilizationThreshold: ptr.To("0.5"), + SkipNodesWithLocalStorage: (*SkipNodesWithLocalStorage)(ptr.To(string(SkipNodesWithLocalStorageFalse))), + SkipNodesWithSystemPods: (*SkipNodesWithSystemPods)(ptr.To(string(SkipNodesWithSystemPodsTrue))), + }, + }, + }, + }, + }, + }, + } + + for _, c := range cases { + tc := c + t.Run(tc.name, func(t *testing.T) { + t.Parallel() + tc.controlPlaneTemplate.setDefaultAutoScalerProfile() + if !reflect.DeepEqual(tc.controlPlaneTemplate, tc.outputTemplate) { + expected, _ := json.MarshalIndent(tc.outputTemplate, "", "\t") + actual, _ := json.MarshalIndent(tc.controlPlaneTemplate, "", "\t") + t.Errorf("Expected %s, got %s", string(expected), string(actual)) + } + }) + } +} diff --git a/api/v1beta1/azuremanagedcontrolplanetemplate_types.go b/api/v1beta1/azuremanagedcontrolplanetemplate_types.go new file mode 100644 index 00000000000..f5be513bfb4 --- /dev/null +++ b/api/v1beta1/azuremanagedcontrolplanetemplate_types.go @@ -0,0 +1,56 @@ +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// AzureManagedControlPlaneTemplateSpec defines the desired state of AzureManagedControlPlaneTemplate. +type AzureManagedControlPlaneTemplateSpec struct { + Template AzureManagedControlPlaneTemplateResource `json:"template"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:resource:path=azuremanagedcontrolplanetemplates,scope=Namespaced,categories=cluster-api +// +kubebuilder:storageversion + +// AzureManagedControlPlaneTemplate is the Schema for the AzureManagedControlPlaneTemplates API. +type AzureManagedControlPlaneTemplate struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec AzureManagedControlPlaneTemplateSpec `json:"spec,omitempty"` +} + +// +kubebuilder:object:root=true + +// AzureManagedControlPlaneTemplateList contains a list of AzureManagedControlPlaneTemplates. +type AzureManagedControlPlaneTemplateList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []AzureManagedControlPlaneTemplate `json:"items"` +} + +func init() { + SchemeBuilder.Register(&AzureManagedControlPlaneTemplate{}, &AzureManagedControlPlaneTemplateList{}) +} + +// AzureManagedControlPlaneTemplateResource describes the data needed to create an AzureManagedCluster from a template. +type AzureManagedControlPlaneTemplateResource struct { + Spec AzureManagedControlPlaneTemplateResourceSpec `json:"spec"` +} diff --git a/api/v1beta1/azuremanagedcontrolplanetemplate_validation.go b/api/v1beta1/azuremanagedcontrolplanetemplate_validation.go new file mode 100644 index 00000000000..23e681e6fc7 --- /dev/null +++ b/api/v1beta1/azuremanagedcontrolplanetemplate_validation.go @@ -0,0 +1,53 @@ +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + "k8s.io/apimachinery/pkg/util/validation/field" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +// Validate the Azure Managed Control Plane Template and return an aggregate error. +func (mcp *AzureManagedControlPlaneTemplate) validateManagedControlPlaneTemplate(cli client.Client) error { + var allErrs field.ErrorList + + allErrs = append(allErrs, validateDNSServiceIP( + mcp.Spec.Template.Spec.DNSServiceIP, + field.NewPath("spec").Child("template").Child("spec").Child("DNSServiceIP"))...) + + allErrs = append(allErrs, validateVersion( + mcp.Spec.Template.Spec.Version, + field.NewPath("spec").Child("template").Child("spec").Child("Version"))...) + + allErrs = append(allErrs, validateLoadBalancerProfile( + mcp.Spec.Template.Spec.LoadBalancerProfile, + field.NewPath("spec").Child("template").Child("spec").Child("LoadBalancerProfile"))...) + + allErrs = append(allErrs, validateManagedClusterNetwork( + cli, + mcp.Labels, + mcp.Namespace, + mcp.Spec.Template.Spec.DNSServiceIP, + mcp.Spec.Template.Spec.VirtualNetwork.Subnet, + field.NewPath("spec").Child("template").Child("spec"))...) + + allErrs = append(allErrs, validateName(mcp.Name, field.NewPath("Name"))...) + + allErrs = append(allErrs, validateAutoScalerProfile(mcp.Spec.Template.Spec.AutoScalerProfile, field.NewPath("spec").Child("template").Child("spec").Child("AutoScalerProfile"))...) + + return allErrs.ToAggregate() +} diff --git a/api/v1beta1/azuremanagedcontrolplanetemplate_webhook.go b/api/v1beta1/azuremanagedcontrolplanetemplate_webhook.go new file mode 100644 index 00000000000..4d8182ac223 --- /dev/null +++ b/api/v1beta1/azuremanagedcontrolplanetemplate_webhook.go @@ -0,0 +1,107 @@ +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + "context" + "reflect" + + apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/util/validation/field" + "sigs.k8s.io/cluster-api-provider-azure/feature" + capifeature "sigs.k8s.io/cluster-api/feature" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/webhook/admission" +) + +// AzureManagedControlPlaneTemplateImmutableMsg is the message used for errors on fields that are immutable. +const AzureManagedControlPlaneTemplateImmutableMsg = "AzureManagedControlPlaneTemplate spec.template.spec field is immutable. Please create new resource instead. ref doc: https://cluster-api.sigs.k8s.io/tasks/experimental-features/cluster-class/change-clusterclass.html" + +// SetupAzureManagedControlPlaneTemplateWithManager will set up the webhook to be managed by the specified manager. +func SetupAzureManagedControlPlaneTemplateWithManager(mgr ctrl.Manager) error { + mcpw := &azureManagedControlPlaneTemplateWebhook{Client: mgr.GetClient()} + return ctrl.NewWebhookManagedBy(mgr). + For(&AzureManagedControlPlaneTemplate{}). + WithDefaulter(mcpw). + WithValidator(mcpw). + Complete() +} + +// +kubebuilder:webhook:verbs=create;update,path=/validate-infrastructure-cluster-x-k8s-io-v1beta1-azuremanagedcontrolplanetemplate,mutating=false,failurePolicy=fail,matchPolicy=Equivalent,groups=infrastructure.cluster.x-k8s.io,resources=azuremanagedcontrolplanetemplates,versions=v1beta1,name=validation.azuremanagedcontrolplanetemplate.infrastructure.cluster.x-k8s.io,sideEffects=None,admissionReviewVersions=v1;v1beta1 +// +kubebuilder:webhook:verbs=create;update,path=/mutate-infrastructure-cluster-x-k8s-io-v1beta1-azuremanagedcontrolplanetemplate,mutating=true,failurePolicy=fail,matchPolicy=Equivalent,groups=infrastructure.cluster.x-k8s.io,resources=azuremanagedcontrolplanetemplates,versions=v1beta1,name=default.azuremanagedcontrolplanetemplate.infrastructure.cluster.x-k8s.io,sideEffects=None,admissionReviewVersions=v1;v1beta1 + +type azureManagedControlPlaneTemplateWebhook struct { + Client client.Client +} + +// Default implements webhook.Defaulter so a webhook will be registered for the type. +func (mcpw *azureManagedControlPlaneTemplateWebhook) Default(ctx context.Context, obj runtime.Object) error { + mcp, ok := obj.(*AzureManagedControlPlaneTemplate) + if !ok { + return apierrors.NewBadRequest("expected an AzureManagedControlPlaneTemplate") + } + mcp.setDefaults() + return nil +} + +// ValidateCreate implements webhook.Validator so a webhook will be registered for the type. +func (mcpw *azureManagedControlPlaneTemplateWebhook) ValidateCreate(ctx context.Context, obj runtime.Object) (admission.Warnings, error) { + mcp, ok := obj.(*AzureManagedControlPlaneTemplate) + if !ok { + return nil, apierrors.NewBadRequest("expected an AzureManagedControlPlaneTemplate") + } + // NOTE: AzureManagedControlPlane relies upon MachinePools, which is behind a feature gate flag. + // The webhook must prevent creating new objects in case the feature flag is disabled. + if !feature.Gates.Enabled(capifeature.MachinePool) { + return nil, field.Forbidden( + field.NewPath("spec"), + "can be set only if the Cluster API 'MachinePool' feature flag is enabled", + ) + } + + return nil, mcp.validateManagedControlPlaneTemplate(mcpw.Client) +} + +// ValidateUpdate implements webhook.Validator so a webhook will be registered for the type. +func (mcpw *azureManagedControlPlaneTemplateWebhook) ValidateUpdate(ctx context.Context, oldObj, newObj runtime.Object) (admission.Warnings, error) { + var allErrs field.ErrorList + old, ok := oldObj.(*AzureManagedControlPlaneTemplate) + if !ok { + return nil, apierrors.NewBadRequest("expected an AzureManagedControlPlaneTemplate") + } + mcp, ok := newObj.(*AzureManagedControlPlaneTemplate) + if !ok { + return nil, apierrors.NewBadRequest("expected an AzureManagedControlPlaneTemplate") + } + if !reflect.DeepEqual(mcp.Spec.Template.Spec, old.Spec.Template.Spec) { + allErrs = append(allErrs, + field.Invalid(field.NewPath("AzureManagedControlPlaneTemplate", "spec", "template", "spec"), mcp, AzureManagedControlPlaneTemplateImmutableMsg), + ) + } + + if len(allErrs) == 0 { + return nil, nil + } + return nil, apierrors.NewInvalid(GroupVersion.WithKind("AzureManagedControlPlaneTemplate").GroupKind(), mcp.Name, allErrs) +} + +// ValidateDelete implements webhook.Validator so a webhook will be registered for the type. +func (mcpw *azureManagedControlPlaneTemplateWebhook) ValidateDelete(ctx context.Context, _ runtime.Object) (admission.Warnings, error) { + return nil, nil +} diff --git a/api/v1beta1/azuremanagedmachinepooltemplate_types.go b/api/v1beta1/azuremanagedmachinepooltemplate_types.go new file mode 100644 index 00000000000..60520c74e14 --- /dev/null +++ b/api/v1beta1/azuremanagedmachinepooltemplate_types.go @@ -0,0 +1,56 @@ +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// AzureManagedMachinePoolTemplateSpec defines the desired state of AzureManagedMachinePoolTemplate. +type AzureManagedMachinePoolTemplateSpec struct { + Template AzureManagedMachinePoolTemplateResource `json:"template"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:resource:path=azuremanagedmachinepooltemplates,scope=Namespaced,categories=cluster-api +// +kubebuilder:storageversion + +// AzureManagedMachinePoolTemplate is the Schema for the AzureManagedMachinePoolTemplates API. +type AzureManagedMachinePoolTemplate struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec AzureManagedMachinePoolTemplateSpec `json:"spec,omitempty"` +} + +// +kubebuilder:object:root=true + +// AzureManagedMachinePoolTemplateList contains a list of AzureManagedMachinePoolTemplates. +type AzureManagedMachinePoolTemplateList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []AzureManagedMachinePoolTemplate `json:"items"` +} + +func init() { + SchemeBuilder.Register(&AzureManagedMachinePoolTemplate{}, &AzureManagedMachinePoolTemplateList{}) +} + +// AzureManagedMachinePoolTemplateResource describes the data needed to create an AzureManagedCluster from a template. +type AzureManagedMachinePoolTemplateResource struct { + Spec AzureManagedMachinePoolTemplateResourceSpec `json:"spec"` +} diff --git a/api/v1beta1/consts.go b/api/v1beta1/consts.go index 419cd3d8c63..8d7f48daad5 100644 --- a/api/v1beta1/consts.go +++ b/api/v1beta1/consts.go @@ -166,3 +166,8 @@ const ( // value for the label is the CAPI Cluster Name. OwnedByClusterLabelKey = NameAzureProviderPrefix + string(ResourceLifecycleOwned) ) + +const ( + // CloudProviderName is the name of the Azure cloud provider. + CloudProviderName = "azure" +) diff --git a/api/v1beta1/types_template.go b/api/v1beta1/types_template.go index baed624f26d..3d1015e477b 100644 --- a/api/v1beta1/types_template.go +++ b/api/v1beta1/types_template.go @@ -18,9 +18,286 @@ package v1beta1 import ( "github.com/pkg/errors" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/utils/net" + clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" ) +// AzureManagedControlPlaneTemplateResourceSpec specifies an Azure managed control plane template resource. +type AzureManagedControlPlaneTemplateResourceSpec struct { + // MachineTemplate contains information about how machines + // should be shaped when creating or updating a control plane. + // +optional + MachineTemplate *AzureManagedControlPlaneTemplateMachineTemplate `json:"machineTemplate,omitempty"` + + // Version defines the desired Kubernetes version. + // +kubebuilder:validation:MinLength:=2 + Version string `json:"version"` + + // VirtualNetwork describes the vnet for the AKS cluster. Will be created if it does not exist. + // +optional + VirtualNetwork ManagedControlPlaneVirtualNetworkTemplate `json:"virtualNetwork,omitempty"` + + // SubscriptionID is the GUID of the Azure subscription to hold this cluster. + // +optional + SubscriptionID string `json:"subscriptionID,omitempty"` + + // Location is a string matching one of the canonical Azure region names. Examples: "westus2", "eastus". + Location string `json:"location"` + + // AdditionalTags is an optional set of tags to add to Azure resources managed by the Azure provider, in addition to the + // ones added by default. + // +optional + AdditionalTags Tags `json:"additionalTags,omitempty"` + + // NetworkPlugin used for building Kubernetes network. + // +kubebuilder:validation:Enum=azure;kubenet + // +optional + NetworkPlugin *string `json:"networkPlugin,omitempty"` + + // NetworkPolicy used for building Kubernetes network. + // +kubebuilder:validation:Enum=azure;calico + // +optional + NetworkPolicy *string `json:"networkPolicy,omitempty"` + + // Outbound configuration used by Nodes. + // +kubebuilder:validation:Enum=loadBalancer;managedNATGateway;userAssignedNATGateway;userDefinedRouting + // +optional + OutboundType *ManagedControlPlaneOutboundType `json:"outboundType,omitempty"` + + // DNSServiceIP is an IP address assigned to the Kubernetes DNS service. + // It must be within the Kubernetes service address range specified in serviceCidr. + // +optional + DNSServiceIP *string `json:"dnsServiceIP,omitempty"` + + // LoadBalancerSKU is the SKU of the loadBalancer to be provisioned. + // +kubebuilder:validation:Enum=Basic;Standard + // +optional + LoadBalancerSKU *string `json:"loadBalancerSKU,omitempty"` + + // IdentityRef is a reference to a AzureClusterIdentity to be used when reconciling this cluster + // +optional + IdentityRef *corev1.ObjectReference `json:"identityRef,omitempty"` + + // AadProfile is Azure Active Directory configuration to integrate with AKS for aad authentication. + // +optional + AADProfile *AADProfile `json:"aadProfile,omitempty"` + + // AddonProfiles are the profiles of managed cluster add-on. + // +optional + AddonProfiles []AddonProfile `json:"addonProfiles,omitempty"` + + // SKU is the SKU of the AKS to be provisioned. + // +optional + SKU *AKSSku `json:"sku,omitempty"` + + // LoadBalancerProfile is the profile of the cluster load balancer. + // +optional + LoadBalancerProfile *LoadBalancerProfile `json:"loadBalancerProfile,omitempty"` + + // APIServerAccessProfile is the access profile for AKS API server. + // +optional + APIServerAccessProfile *APIServerAccessProfileTemplate `json:"apiServerAccessProfile,omitempty"` + + // AutoscalerProfile is the parameters to be applied to the cluster-autoscaler when enabled + // +optional + AutoScalerProfile *AutoScalerProfile `json:"autoscalerProfile,omitempty"` +} + +// AzureManagedControlPlaneTemplateMachineTemplate specifies an Azure managed control plane template. +type AzureManagedControlPlaneTemplateMachineTemplate struct { + // Standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + ObjectMeta clusterv1.ObjectMeta `json:"metadata,omitempty"` + + // NodeDrainTimeout is the total amount of time that the controller will spend on draining a controlplane node + // The default value is 0, meaning that the node can be drained without any time limitations. + // NOTE: NodeDrainTimeout is different from `kubectl drain --timeout` + // +optional + NodeDrainTimeout *metav1.Duration `json:"nodeDrainTimeout,omitempty"` + + // NodeVolumeDetachTimeout is the total amount of time that the controller will spend on waiting for all volumes + // to be detached. The default value is 0, meaning that the volumes can be detached without any time limitations. + // +optional + NodeVolumeDetachTimeout *metav1.Duration `json:"nodeVolumeDetachTimeout,omitempty"` + + // NodeDeletionTimeout defines how long the machine controller will attempt to delete the Node that the Machine + // hosts after the Machine is marked for deletion. A duration of 0 will retry deletion indefinitely. + // If no value is provided, the default value for this property of the Machine resource will be used. + // +optional + NodeDeletionTimeout *metav1.Duration `json:"nodeDeletionTimeout,omitempty"` +} + +// AzureManagedMachinePoolTemplateResourceSpec specifies an Azure managed control plane template resource. +type AzureManagedMachinePoolTemplateResourceSpec struct { + // AdditionalTags is an optional set of tags to add to Azure resources managed by the + // Azure provider, in addition to the ones added by default. + // +optional + AdditionalTags Tags `json:"additionalTags,omitempty"` + + // Name - name of the agent pool. If not specified, CAPZ uses the name of the CR as the agent pool name. + // Immutable. + // +optional + Name *string `json:"name,omitempty"` + + // Mode - represents mode of an agent pool. Possible values include: System, User. + // +kubebuilder:validation:Enum=System;User + Mode string `json:"mode"` + + // SKU is the size of the VMs in the node pool. + // Immutable. + SKU string `json:"sku"` + + // OSDiskSizeGB is the disk size for every machine in this agent pool. + // If you specify 0, it will apply the default osDisk size according to the vmSize specified. + // Immutable. + // +optional + OSDiskSizeGB *int32 `json:"osDiskSizeGB,omitempty"` + + // AvailabilityZones - Availability zones for nodes. Must use VirtualMachineScaleSets AgentPoolType. + // Immutable. + // +optional + AvailabilityZones []string `json:"availabilityZones,omitempty"` + + // Node labels - labels for all of the nodes present in node pool. + // See also [AKS doc]. + // + // [AKS doc]: https://learn.microsoft.com/azure/aks/use-labels + // +optional + NodeLabels map[string]string `json:"nodeLabels,omitempty"` + + // Taints specifies the taints for nodes present in this agent pool. + // See also [AKS doc]. + // + // [AKS doc]: https://learn.microsoft.com/azure/aks/use-multiple-node-pools#setting-node-pool-taints + // +optional + Taints Taints `json:"taints,omitempty"` + + // Scaling specifies the autoscaling parameters for the node pool. + // +optional + Scaling *ManagedMachinePoolScaling `json:"scaling,omitempty"` + + // MaxPods specifies the kubelet `--max-pods` configuration for the node pool. + // Immutable. + // See also [AKS doc], [K8s doc]. + // + // [AKS doc]: https://learn.microsoft.com/azure/aks/configure-azure-cni#configure-maximum---new-clusters + // [K8s doc]: https://kubernetes.io/docs/reference/command-line-tools-reference/kubelet/ + // +optional + MaxPods *int32 `json:"maxPods,omitempty"` + + // OsDiskType specifies the OS disk type for each node in the pool. Allowed values are 'Ephemeral' and 'Managed' (default). + // Immutable. + // See also [AKS doc]. + // + // [AKS doc]: https://learn.microsoft.com/azure/aks/cluster-configuration#ephemeral-os + // +kubebuilder:validation:Enum=Ephemeral;Managed + // +kubebuilder:default=Managed + // +optional + OsDiskType *string `json:"osDiskType,omitempty"` + + // EnableUltraSSD enables the storage type UltraSSD_LRS for the agent pool. + // Immutable. + // +optional + EnableUltraSSD *bool `json:"enableUltraSSD,omitempty"` + + // OSType specifies the virtual machine operating system. Default to Linux. Possible values include: 'Linux', 'Windows'. + // 'Windows' requires the AzureManagedControlPlane's `spec.networkPlugin` to be `azure`. + // Immutable. + // See also [AKS doc]. + // + // [AKS doc]: https://learn.microsoft.com/rest/api/aks/agent-pools/create-or-update?tabs=HTTP#ostype + // +kubebuilder:validation:Enum=Linux;Windows + // +optional + OSType *string `json:"osType,omitempty"` + + // EnableNodePublicIP controls whether or not nodes in the pool each have a public IP address. + // Immutable. + // +optional + EnableNodePublicIP *bool `json:"enableNodePublicIP,omitempty"` + + // NodePublicIPPrefixID specifies the public IP prefix resource ID which VM nodes should use IPs from. + // Immutable. + // +optional + NodePublicIPPrefixID *string `json:"nodePublicIPPrefixID,omitempty"` + + // ScaleSetPriority specifies the ScaleSetPriority value. Default to Regular. Possible values include: 'Regular', 'Spot' + // Immutable. + // +kubebuilder:validation:Enum=Regular;Spot + // +optional + ScaleSetPriority *string `json:"scaleSetPriority,omitempty"` + + // ScaleDownMode affects the cluster autoscaler behavior. Default to Delete. Possible values include: 'Deallocate', 'Delete' + // +kubebuilder:validation:Enum=Deallocate;Delete + // +kubebuilder:default=Delete + // +optional + ScaleDownMode *string `json:"scaleDownMode,omitempty"` + + // SpotMaxPrice defines max price to pay for spot instance. Possible values are any decimal value greater than zero or -1. + // If you set the max price to be -1, the VM won't be evicted based on price. The price for the VM will be the current price + // for spot or the price for a standard VM, which ever is less, as long as there's capacity and quota available. + // +optional + SpotMaxPrice *resource.Quantity `json:"spotMaxPrice,omitempty"` + + // KubeletConfig specifies the kubelet configurations for nodes. + // Immutable. + // +optional + KubeletConfig *KubeletConfig `json:"kubeletConfig,omitempty"` + + // KubeletDiskType specifies the kubelet disk type. Default to OS. Possible values include: 'OS', 'Temporary'. + // Requires Microsoft.ContainerService/KubeletDisk preview feature to be set. + // Immutable. + // See also [AKS doc]. + // + // [AKS doc]: https://learn.microsoft.com/rest/api/aks/agent-pools/create-or-update?tabs=HTTP#kubeletdisktype + // +kubebuilder:validation:Enum=OS;Temporary + // +optional + KubeletDiskType *KubeletDiskType `json:"kubeletDiskType,omitempty"` + + // LinuxOSConfig specifies the custom Linux OS settings and configurations. + // Immutable. + // +optional + LinuxOSConfig *LinuxOSConfig `json:"linuxOSConfig,omitempty"` + + // SubnetName specifies the Subnet where the MachinePool will be placed + // Immutable. + // +optional + SubnetName *string `json:"subnetName,omitempty"` + + // EnableFIPS indicates whether FIPS is enabled on the node pool. + // Immutable. + // +optional + EnableFIPS *bool `json:"enableFIPS,omitempty"` +} + +// APIServerAccessProfileTemplate specifies an API server access profile template. +type APIServerAccessProfileTemplate struct { + // EnablePrivateCluster - Whether to create the cluster as a private cluster or not. + // +optional + EnablePrivateCluster *bool `json:"enablePrivateCluster,omitempty"` + // PrivateDNSZone - Private dns zone mode for private cluster. + // +kubebuilder:validation:Enum=System;None + // +optional + PrivateDNSZone *string `json:"privateDNSZone,omitempty"` + // EnablePrivateClusterPublicFQDN - Whether to create additional public FQDN for private cluster or not. + // +optional + EnablePrivateClusterPublicFQDN *bool `json:"enablePrivateClusterPublicFQDN,omitempty"` +} + +// ManagedControlPlaneVirtualNetworkTemplate specifies a managed control plane virtual network template. +type ManagedControlPlaneVirtualNetworkTemplate struct { + Name string `json:"name"` + CIDRBlock string `json:"cidrBlock"` + // +optional + Subnet ManagedControlPlaneSubnet `json:"subnet,omitempty"` +} + +// AzureManagedClusterTemplateResourceSpec specifies an Azure managed cluster template resource. +type AzureManagedClusterTemplateResourceSpec struct{} + // AzureClusterTemplateResourceSpec specifies an Azure cluster template resource. type AzureClusterTemplateResourceSpec struct { AzureClusterClassSpec `json:",inline"` diff --git a/api/v1beta1/zz_generated.deepcopy.go b/api/v1beta1/zz_generated.deepcopy.go index 6e83abe36b1..99820e8dc96 100644 --- a/api/v1beta1/zz_generated.deepcopy.go +++ b/api/v1beta1/zz_generated.deepcopy.go @@ -99,6 +99,36 @@ func (in *APIServerAccessProfile) DeepCopy() *APIServerAccessProfile { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *APIServerAccessProfileTemplate) DeepCopyInto(out *APIServerAccessProfileTemplate) { + *out = *in + if in.EnablePrivateCluster != nil { + in, out := &in.EnablePrivateCluster, &out.EnablePrivateCluster + *out = new(bool) + **out = **in + } + if in.PrivateDNSZone != nil { + in, out := &in.PrivateDNSZone, &out.PrivateDNSZone + *out = new(string) + **out = **in + } + if in.EnablePrivateClusterPublicFQDN != nil { + in, out := &in.EnablePrivateClusterPublicFQDN, &out.EnablePrivateClusterPublicFQDN + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new APIServerAccessProfileTemplate. +func (in *APIServerAccessProfileTemplate) DeepCopy() *APIServerAccessProfileTemplate { + if in == nil { + return nil + } + out := new(APIServerAccessProfileTemplate) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *AdditionalCapabilities) DeepCopyInto(out *AdditionalCapabilities) { *out = *in @@ -1089,6 +1119,111 @@ func (in *AzureManagedClusterStatus) DeepCopy() *AzureManagedClusterStatus { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AzureManagedClusterTemplate) DeepCopyInto(out *AzureManagedClusterTemplate) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + out.Spec = in.Spec +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AzureManagedClusterTemplate. +func (in *AzureManagedClusterTemplate) DeepCopy() *AzureManagedClusterTemplate { + if in == nil { + return nil + } + out := new(AzureManagedClusterTemplate) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *AzureManagedClusterTemplate) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AzureManagedClusterTemplateList) DeepCopyInto(out *AzureManagedClusterTemplateList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]AzureManagedClusterTemplate, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AzureManagedClusterTemplateList. +func (in *AzureManagedClusterTemplateList) DeepCopy() *AzureManagedClusterTemplateList { + if in == nil { + return nil + } + out := new(AzureManagedClusterTemplateList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *AzureManagedClusterTemplateList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AzureManagedClusterTemplateResource) DeepCopyInto(out *AzureManagedClusterTemplateResource) { + *out = *in + out.Spec = in.Spec +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AzureManagedClusterTemplateResource. +func (in *AzureManagedClusterTemplateResource) DeepCopy() *AzureManagedClusterTemplateResource { + if in == nil { + return nil + } + out := new(AzureManagedClusterTemplateResource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AzureManagedClusterTemplateResourceSpec) DeepCopyInto(out *AzureManagedClusterTemplateResourceSpec) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AzureManagedClusterTemplateResourceSpec. +func (in *AzureManagedClusterTemplateResourceSpec) DeepCopy() *AzureManagedClusterTemplateResourceSpec { + if in == nil { + return nil + } + out := new(AzureManagedClusterTemplateResourceSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AzureManagedClusterTemplateSpec) DeepCopyInto(out *AzureManagedClusterTemplateSpec) { + *out = *in + out.Template = in.Template +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AzureManagedClusterTemplateSpec. +func (in *AzureManagedClusterTemplateSpec) DeepCopy() *AzureManagedClusterTemplateSpec { + if in == nil { + return nil + } + out := new(AzureManagedClusterTemplateSpec) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *AzureManagedControlPlane) DeepCopyInto(out *AzureManagedControlPlane) { *out = *in @@ -1301,6 +1436,217 @@ func (in *AzureManagedControlPlaneStatus) DeepCopy() *AzureManagedControlPlaneSt return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AzureManagedControlPlaneTemplate) DeepCopyInto(out *AzureManagedControlPlaneTemplate) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AzureManagedControlPlaneTemplate. +func (in *AzureManagedControlPlaneTemplate) DeepCopy() *AzureManagedControlPlaneTemplate { + if in == nil { + return nil + } + out := new(AzureManagedControlPlaneTemplate) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *AzureManagedControlPlaneTemplate) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AzureManagedControlPlaneTemplateList) DeepCopyInto(out *AzureManagedControlPlaneTemplateList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]AzureManagedControlPlaneTemplate, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AzureManagedControlPlaneTemplateList. +func (in *AzureManagedControlPlaneTemplateList) DeepCopy() *AzureManagedControlPlaneTemplateList { + if in == nil { + return nil + } + out := new(AzureManagedControlPlaneTemplateList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *AzureManagedControlPlaneTemplateList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AzureManagedControlPlaneTemplateMachineTemplate) DeepCopyInto(out *AzureManagedControlPlaneTemplateMachineTemplate) { + *out = *in + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + if in.NodeDrainTimeout != nil { + in, out := &in.NodeDrainTimeout, &out.NodeDrainTimeout + *out = new(v1.Duration) + **out = **in + } + if in.NodeVolumeDetachTimeout != nil { + in, out := &in.NodeVolumeDetachTimeout, &out.NodeVolumeDetachTimeout + *out = new(v1.Duration) + **out = **in + } + if in.NodeDeletionTimeout != nil { + in, out := &in.NodeDeletionTimeout, &out.NodeDeletionTimeout + *out = new(v1.Duration) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AzureManagedControlPlaneTemplateMachineTemplate. +func (in *AzureManagedControlPlaneTemplateMachineTemplate) DeepCopy() *AzureManagedControlPlaneTemplateMachineTemplate { + if in == nil { + return nil + } + out := new(AzureManagedControlPlaneTemplateMachineTemplate) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AzureManagedControlPlaneTemplateResource) DeepCopyInto(out *AzureManagedControlPlaneTemplateResource) { + *out = *in + in.Spec.DeepCopyInto(&out.Spec) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AzureManagedControlPlaneTemplateResource. +func (in *AzureManagedControlPlaneTemplateResource) DeepCopy() *AzureManagedControlPlaneTemplateResource { + if in == nil { + return nil + } + out := new(AzureManagedControlPlaneTemplateResource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AzureManagedControlPlaneTemplateResourceSpec) DeepCopyInto(out *AzureManagedControlPlaneTemplateResourceSpec) { + *out = *in + if in.MachineTemplate != nil { + in, out := &in.MachineTemplate, &out.MachineTemplate + *out = new(AzureManagedControlPlaneTemplateMachineTemplate) + (*in).DeepCopyInto(*out) + } + in.VirtualNetwork.DeepCopyInto(&out.VirtualNetwork) + if in.AdditionalTags != nil { + in, out := &in.AdditionalTags, &out.AdditionalTags + *out = make(Tags, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.NetworkPlugin != nil { + in, out := &in.NetworkPlugin, &out.NetworkPlugin + *out = new(string) + **out = **in + } + if in.NetworkPolicy != nil { + in, out := &in.NetworkPolicy, &out.NetworkPolicy + *out = new(string) + **out = **in + } + if in.OutboundType != nil { + in, out := &in.OutboundType, &out.OutboundType + *out = new(ManagedControlPlaneOutboundType) + **out = **in + } + if in.DNSServiceIP != nil { + in, out := &in.DNSServiceIP, &out.DNSServiceIP + *out = new(string) + **out = **in + } + if in.LoadBalancerSKU != nil { + in, out := &in.LoadBalancerSKU, &out.LoadBalancerSKU + *out = new(string) + **out = **in + } + if in.IdentityRef != nil { + in, out := &in.IdentityRef, &out.IdentityRef + *out = new(corev1.ObjectReference) + **out = **in + } + if in.AADProfile != nil { + in, out := &in.AADProfile, &out.AADProfile + *out = new(AADProfile) + (*in).DeepCopyInto(*out) + } + if in.AddonProfiles != nil { + in, out := &in.AddonProfiles, &out.AddonProfiles + *out = make([]AddonProfile, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SKU != nil { + in, out := &in.SKU, &out.SKU + *out = new(AKSSku) + **out = **in + } + if in.LoadBalancerProfile != nil { + in, out := &in.LoadBalancerProfile, &out.LoadBalancerProfile + *out = new(LoadBalancerProfile) + (*in).DeepCopyInto(*out) + } + if in.APIServerAccessProfile != nil { + in, out := &in.APIServerAccessProfile, &out.APIServerAccessProfile + *out = new(APIServerAccessProfileTemplate) + (*in).DeepCopyInto(*out) + } + if in.AutoScalerProfile != nil { + in, out := &in.AutoScalerProfile, &out.AutoScalerProfile + *out = new(AutoScalerProfile) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AzureManagedControlPlaneTemplateResourceSpec. +func (in *AzureManagedControlPlaneTemplateResourceSpec) DeepCopy() *AzureManagedControlPlaneTemplateResourceSpec { + if in == nil { + return nil + } + out := new(AzureManagedControlPlaneTemplateResourceSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AzureManagedControlPlaneTemplateSpec) DeepCopyInto(out *AzureManagedControlPlaneTemplateSpec) { + *out = *in + in.Template.DeepCopyInto(&out.Template) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AzureManagedControlPlaneTemplateSpec. +func (in *AzureManagedControlPlaneTemplateSpec) DeepCopy() *AzureManagedControlPlaneTemplateSpec { + if in == nil { + return nil + } + out := new(AzureManagedControlPlaneTemplateSpec) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *AzureManagedMachinePool) DeepCopyInto(out *AzureManagedMachinePool) { *out = *in @@ -1526,6 +1872,220 @@ func (in *AzureManagedMachinePoolStatus) DeepCopy() *AzureManagedMachinePoolStat return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AzureManagedMachinePoolTemplate) DeepCopyInto(out *AzureManagedMachinePoolTemplate) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AzureManagedMachinePoolTemplate. +func (in *AzureManagedMachinePoolTemplate) DeepCopy() *AzureManagedMachinePoolTemplate { + if in == nil { + return nil + } + out := new(AzureManagedMachinePoolTemplate) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *AzureManagedMachinePoolTemplate) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AzureManagedMachinePoolTemplateList) DeepCopyInto(out *AzureManagedMachinePoolTemplateList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]AzureManagedMachinePoolTemplate, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AzureManagedMachinePoolTemplateList. +func (in *AzureManagedMachinePoolTemplateList) DeepCopy() *AzureManagedMachinePoolTemplateList { + if in == nil { + return nil + } + out := new(AzureManagedMachinePoolTemplateList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *AzureManagedMachinePoolTemplateList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AzureManagedMachinePoolTemplateResource) DeepCopyInto(out *AzureManagedMachinePoolTemplateResource) { + *out = *in + in.Spec.DeepCopyInto(&out.Spec) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AzureManagedMachinePoolTemplateResource. +func (in *AzureManagedMachinePoolTemplateResource) DeepCopy() *AzureManagedMachinePoolTemplateResource { + if in == nil { + return nil + } + out := new(AzureManagedMachinePoolTemplateResource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AzureManagedMachinePoolTemplateResourceSpec) DeepCopyInto(out *AzureManagedMachinePoolTemplateResourceSpec) { + *out = *in + if in.AdditionalTags != nil { + in, out := &in.AdditionalTags, &out.AdditionalTags + *out = make(Tags, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.OSDiskSizeGB != nil { + in, out := &in.OSDiskSizeGB, &out.OSDiskSizeGB + *out = new(int32) + **out = **in + } + if in.AvailabilityZones != nil { + in, out := &in.AvailabilityZones, &out.AvailabilityZones + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.NodeLabels != nil { + in, out := &in.NodeLabels, &out.NodeLabels + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.Taints != nil { + in, out := &in.Taints, &out.Taints + *out = make(Taints, len(*in)) + copy(*out, *in) + } + if in.Scaling != nil { + in, out := &in.Scaling, &out.Scaling + *out = new(ManagedMachinePoolScaling) + (*in).DeepCopyInto(*out) + } + if in.MaxPods != nil { + in, out := &in.MaxPods, &out.MaxPods + *out = new(int32) + **out = **in + } + if in.OsDiskType != nil { + in, out := &in.OsDiskType, &out.OsDiskType + *out = new(string) + **out = **in + } + if in.EnableUltraSSD != nil { + in, out := &in.EnableUltraSSD, &out.EnableUltraSSD + *out = new(bool) + **out = **in + } + if in.OSType != nil { + in, out := &in.OSType, &out.OSType + *out = new(string) + **out = **in + } + if in.EnableNodePublicIP != nil { + in, out := &in.EnableNodePublicIP, &out.EnableNodePublicIP + *out = new(bool) + **out = **in + } + if in.NodePublicIPPrefixID != nil { + in, out := &in.NodePublicIPPrefixID, &out.NodePublicIPPrefixID + *out = new(string) + **out = **in + } + if in.ScaleSetPriority != nil { + in, out := &in.ScaleSetPriority, &out.ScaleSetPriority + *out = new(string) + **out = **in + } + if in.ScaleDownMode != nil { + in, out := &in.ScaleDownMode, &out.ScaleDownMode + *out = new(string) + **out = **in + } + if in.SpotMaxPrice != nil { + in, out := &in.SpotMaxPrice, &out.SpotMaxPrice + x := (*in).DeepCopy() + *out = &x + } + if in.KubeletConfig != nil { + in, out := &in.KubeletConfig, &out.KubeletConfig + *out = new(KubeletConfig) + (*in).DeepCopyInto(*out) + } + if in.KubeletDiskType != nil { + in, out := &in.KubeletDiskType, &out.KubeletDiskType + *out = new(KubeletDiskType) + **out = **in + } + if in.LinuxOSConfig != nil { + in, out := &in.LinuxOSConfig, &out.LinuxOSConfig + *out = new(LinuxOSConfig) + (*in).DeepCopyInto(*out) + } + if in.SubnetName != nil { + in, out := &in.SubnetName, &out.SubnetName + *out = new(string) + **out = **in + } + if in.EnableFIPS != nil { + in, out := &in.EnableFIPS, &out.EnableFIPS + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AzureManagedMachinePoolTemplateResourceSpec. +func (in *AzureManagedMachinePoolTemplateResourceSpec) DeepCopy() *AzureManagedMachinePoolTemplateResourceSpec { + if in == nil { + return nil + } + out := new(AzureManagedMachinePoolTemplateResourceSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AzureManagedMachinePoolTemplateSpec) DeepCopyInto(out *AzureManagedMachinePoolTemplateSpec) { + *out = *in + in.Template.DeepCopyInto(&out.Template) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AzureManagedMachinePoolTemplateSpec. +func (in *AzureManagedMachinePoolTemplateSpec) DeepCopy() *AzureManagedMachinePoolTemplateSpec { + if in == nil { + return nil + } + out := new(AzureManagedMachinePoolTemplateSpec) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *AzureMarketplaceImage) DeepCopyInto(out *AzureMarketplaceImage) { *out = *in @@ -2241,6 +2801,22 @@ func (in *ManagedControlPlaneVirtualNetwork) DeepCopy() *ManagedControlPlaneVirt return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ManagedControlPlaneVirtualNetworkTemplate) DeepCopyInto(out *ManagedControlPlaneVirtualNetworkTemplate) { + *out = *in + in.Subnet.DeepCopyInto(&out.Subnet) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ManagedControlPlaneVirtualNetworkTemplate. +func (in *ManagedControlPlaneVirtualNetworkTemplate) DeepCopy() *ManagedControlPlaneVirtualNetworkTemplate { + if in == nil { + return nil + } + out := new(ManagedControlPlaneVirtualNetworkTemplate) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ManagedDiskParameters) DeepCopyInto(out *ManagedDiskParameters) { *out = *in diff --git a/config/crd/bases/infrastructure.cluster.x-k8s.io_azuremanagedclustertemplates.yaml b/config/crd/bases/infrastructure.cluster.x-k8s.io_azuremanagedclustertemplates.yaml new file mode 100644 index 00000000000..cbde332f327 --- /dev/null +++ b/config/crd/bases/infrastructure.cluster.x-k8s.io_azuremanagedclustertemplates.yaml @@ -0,0 +1,58 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.9.2 + creationTimestamp: null + name: azuremanagedclustertemplates.infrastructure.cluster.x-k8s.io +spec: + group: infrastructure.cluster.x-k8s.io + names: + categories: + - cluster-api + kind: AzureManagedClusterTemplate + listKind: AzureManagedClusterTemplateList + plural: azuremanagedclustertemplates + singular: azuremanagedclustertemplate + scope: Namespaced + versions: + - name: v1beta1 + schema: + openAPIV3Schema: + description: AzureManagedClusterTemplate is the Schema for the AzureManagedClusterTemplates + API. + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: AzureManagedClusterTemplateSpec defines the desired state + of AzureManagedClusterTemplate. + properties: + template: + description: AzureManagedClusterTemplateResource describes the data + needed to create an AzureManagedCluster from a template. + properties: + spec: + description: AzureManagedClusterTemplateResourceSpec specifies + an Azure managed cluster template resource. + type: object + required: + - spec + type: object + required: + - template + type: object + type: object + served: true + storage: true diff --git a/config/crd/bases/infrastructure.cluster.x-k8s.io_azuremanagedcontrolplanes.yaml b/config/crd/bases/infrastructure.cluster.x-k8s.io_azuremanagedcontrolplanes.yaml index f0e7e06b802..2010da0f89a 100644 --- a/config/crd/bases/infrastructure.cluster.x-k8s.io_azuremanagedcontrolplanes.yaml +++ b/config/crd/bases/infrastructure.cluster.x-k8s.io_azuremanagedcontrolplanes.yaml @@ -682,6 +682,11 @@ spec: ready: description: Ready is true when the provider resource is ready. type: boolean + replicas: + description: Total number of non-terminated machines targeted by this + control plane (their labels match the selector). + format: int32 + type: integer type: object type: object served: true diff --git a/config/crd/bases/infrastructure.cluster.x-k8s.io_azuremanagedcontrolplanetemplates.yaml b/config/crd/bases/infrastructure.cluster.x-k8s.io_azuremanagedcontrolplanetemplates.yaml new file mode 100644 index 00000000000..b992a4cef59 --- /dev/null +++ b/config/crd/bases/infrastructure.cluster.x-k8s.io_azuremanagedcontrolplanetemplates.yaml @@ -0,0 +1,557 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.9.2 + creationTimestamp: null + name: azuremanagedcontrolplanetemplates.infrastructure.cluster.x-k8s.io +spec: + group: infrastructure.cluster.x-k8s.io + names: + categories: + - cluster-api + kind: AzureManagedControlPlaneTemplate + listKind: AzureManagedControlPlaneTemplateList + plural: azuremanagedcontrolplanetemplates + singular: azuremanagedcontrolplanetemplate + scope: Namespaced + versions: + - name: v1beta1 + schema: + openAPIV3Schema: + description: AzureManagedControlPlaneTemplate is the Schema for the AzureManagedControlPlaneTemplates + API. + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: AzureManagedControlPlaneTemplateSpec defines the desired + state of AzureManagedControlPlaneTemplate. + properties: + template: + description: AzureManagedControlPlaneTemplateResource describes the + data needed to create an AzureManagedCluster from a template. + properties: + spec: + description: AzureManagedControlPlaneTemplateResourceSpec specifies + an Azure managed control plane template resource. + properties: + aadProfile: + description: AadProfile is Azure Active Directory configuration + to integrate with AKS for aad authentication. + properties: + adminGroupObjectIDs: + description: AdminGroupObjectIDs - AAD group object IDs + that will have admin role of the cluster. + items: + type: string + type: array + managed: + description: Managed - Whether to enable managed AAD. + type: boolean + required: + - adminGroupObjectIDs + - managed + type: object + additionalTags: + additionalProperties: + type: string + description: AdditionalTags is an optional set of tags to + add to Azure resources managed by the Azure provider, in + addition to the ones added by default. + type: object + addonProfiles: + description: AddonProfiles are the profiles of managed cluster + add-on. + items: + description: AddonProfile represents a managed cluster add-on. + properties: + config: + additionalProperties: + type: string + description: Config - Key-value pairs for configuring + the add-on. + type: object + enabled: + description: Enabled - Whether the add-on is enabled + or not. + type: boolean + name: + description: Name - The name of the managed cluster + add-on. + type: string + required: + - enabled + - name + type: object + type: array + apiServerAccessProfile: + description: APIServerAccessProfile is the access profile + for AKS API server. + properties: + enablePrivateCluster: + description: EnablePrivateCluster - Whether to create + the cluster as a private cluster or not. + type: boolean + enablePrivateClusterPublicFQDN: + description: EnablePrivateClusterPublicFQDN - Whether + to create additional public FQDN for private cluster + or not. + type: boolean + privateDNSZone: + description: PrivateDNSZone - Private dns zone mode for + private cluster. + enum: + - System + - None + type: string + type: object + autoscalerProfile: + description: AutoscalerProfile is the parameters to be applied + to the cluster-autoscaler when enabled + properties: + balanceSimilarNodeGroups: + description: BalanceSimilarNodeGroups - Valid values are + 'true' and 'false'. The default is false. + enum: + - "true" + - "false" + type: string + expander: + description: Expander - If not specified, the default + is 'random'. See [expanders](https://github.com/kubernetes/autoscaler/blob/master/cluster-autoscaler/FAQ.md#what-are-expanders) + for more information. + enum: + - least-waste + - most-pods + - priority + - random + type: string + maxEmptyBulkDelete: + description: MaxEmptyBulkDelete - The default is 10. + type: string + maxGracefulTerminationSec: + description: MaxGracefulTerminationSec - The default is + 600. + pattern: ^(\d+)$ + type: string + maxNodeProvisionTime: + description: MaxNodeProvisionTime - The default is '15m'. + Values must be an integer followed by an 'm'. No unit + of time other than minutes (m) is supported. + pattern: ^(\d+)m$ + type: string + maxTotalUnreadyPercentage: + description: MaxTotalUnreadyPercentage - The default is + 45. The maximum is 100 and the minimum is 0. + maxLength: 3 + minLength: 1 + pattern: ^(\d+)$ + type: string + newPodScaleUpDelay: + description: NewPodScaleUpDelay - For scenarios like burst/batch + scale where you don't want CA to act before the kubernetes + scheduler could schedule all the pods, you can tell + CA to ignore unscheduled pods before they're a certain + age. The default is '0s'. Values must be an integer + followed by a unit ('s' for seconds, 'm' for minutes, + 'h' for hours, etc). + type: string + okTotalUnreadyCount: + description: OkTotalUnreadyCount - This must be an integer. + The default is 3. + pattern: ^(\d+)$ + type: string + scaleDownDelayAfterAdd: + description: ScaleDownDelayAfterAdd - The default is '10m'. + Values must be an integer followed by an 'm'. No unit + of time other than minutes (m) is supported. + pattern: ^(\d+)m$ + type: string + scaleDownDelayAfterDelete: + description: ScaleDownDelayAfterDelete - The default is + the scan-interval. Values must be an integer followed + by an 's'. No unit of time other than seconds (s) is + supported. + pattern: ^(\d+)s$ + type: string + scaleDownDelayAfterFailure: + description: ScaleDownDelayAfterFailure - The default + is '3m'. Values must be an integer followed by an 'm'. + No unit of time other than minutes (m) is supported. + pattern: ^(\d+)m$ + type: string + scaleDownUnneededTime: + description: ScaleDownUnneededTime - The default is '10m'. + Values must be an integer followed by an 'm'. No unit + of time other than minutes (m) is supported. + pattern: ^(\d+)m$ + type: string + scaleDownUnreadyTime: + description: ScaleDownUnreadyTime - The default is '20m'. + Values must be an integer followed by an 'm'. No unit + of time other than minutes (m) is supported. + pattern: ^(\d+)m$ + type: string + scaleDownUtilizationThreshold: + description: ScaleDownUtilizationThreshold - The default + is '0.5'. + type: string + scanInterval: + description: ScanInterval - How often cluster is reevaluated + for scale up or down. The default is '10s'. + pattern: ^(\d+)s$ + type: string + skipNodesWithLocalStorage: + description: SkipNodesWithLocalStorage - The default is + false. + enum: + - "true" + - "false" + type: string + skipNodesWithSystemPods: + description: SkipNodesWithSystemPods - The default is + true. + enum: + - "true" + - "false" + type: string + type: object + dnsServiceIP: + description: DNSServiceIP is an IP address assigned to the + Kubernetes DNS service. It must be within the Kubernetes + service address range specified in serviceCidr. + type: string + identityRef: + description: IdentityRef is a reference to a AzureClusterIdentity + to be used when reconciling this cluster + properties: + apiVersion: + description: API version of the referent. + type: string + fieldPath: + description: 'If referring to a piece of an object instead + of an entire object, this string should contain a valid + JSON/Go field access statement, such as desiredState.manifest.containers[2]. + For example, if the object reference is to a container + within a pod, this would take on a value like: "spec.containers{name}" + (where "name" refers to the name of the container that + triggered the event) or if no container name is specified + "spec.containers[2]" (container with index 2 in this + pod). This syntax is chosen only to have some well-defined + way of referencing a part of an object. TODO: this design + is not final and this field is subject to change in + the future.' + type: string + kind: + description: 'Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + namespace: + description: 'Namespace of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/' + type: string + resourceVersion: + description: 'Specific resourceVersion to which this reference + is made, if any. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency' + type: string + uid: + description: 'UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids' + type: string + type: object + x-kubernetes-map-type: atomic + loadBalancerProfile: + description: LoadBalancerProfile is the profile of the cluster + load balancer. + properties: + allocatedOutboundPorts: + description: AllocatedOutboundPorts - Desired number of + allocated SNAT ports per VM. Allowed values must be + in the range of 0 to 64000 (inclusive). The default + value is 0 which results in Azure dynamically allocating + ports. + format: int32 + type: integer + idleTimeoutInMinutes: + description: IdleTimeoutInMinutes - Desired outbound flow + idle timeout in minutes. Allowed values must be in the + range of 4 to 120 (inclusive). The default value is + 30 minutes. + format: int32 + type: integer + managedOutboundIPs: + description: ManagedOutboundIPs - Desired managed outbound + IPs for the cluster load balancer. + format: int32 + type: integer + outboundIPPrefixes: + description: OutboundIPPrefixes - Desired outbound IP + Prefix resources for the cluster load balancer. + items: + type: string + type: array + outboundIPs: + description: OutboundIPs - Desired outbound IP resources + for the cluster load balancer. + items: + type: string + type: array + type: object + loadBalancerSKU: + description: LoadBalancerSKU is the SKU of the loadBalancer + to be provisioned. + enum: + - Basic + - Standard + type: string + location: + description: 'Location is a string matching one of the canonical + Azure region names. Examples: "westus2", "eastus".' + type: string + machineTemplate: + description: MachineTemplate contains information about how + machines should be shaped when creating or updating a control + plane. + properties: + metadata: + description: 'Standard object''s metadata. More info: + https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata' + properties: + annotations: + additionalProperties: + type: string + description: 'Annotations is an unstructured key value + map stored with a resource that may be set by external + tools to store and retrieve arbitrary metadata. + They are not queryable and should be preserved when + modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations' + type: object + labels: + additionalProperties: + type: string + description: 'Map of string keys and values that can + be used to organize and categorize (scope and select) + objects. May match selectors of replication controllers + and services. More info: http://kubernetes.io/docs/user-guide/labels' + type: object + type: object + nodeDeletionTimeout: + description: NodeDeletionTimeout defines how long the + machine controller will attempt to delete the Node that + the Machine hosts after the Machine is marked for deletion. + A duration of 0 will retry deletion indefinitely. If + no value is provided, the default value for this property + of the Machine resource will be used. + type: string + nodeDrainTimeout: + description: 'NodeDrainTimeout is the total amount of + time that the controller will spend on draining a controlplane + node The default value is 0, meaning that the node can + be drained without any time limitations. NOTE: NodeDrainTimeout + is different from `kubectl drain --timeout`' + type: string + nodeVolumeDetachTimeout: + description: NodeVolumeDetachTimeout is the total amount + of time that the controller will spend on waiting for + all volumes to be detached. The default value is 0, + meaning that the volumes can be detached without any + time limitations. + type: string + type: object + networkPlugin: + description: NetworkPlugin used for building Kubernetes network. + enum: + - azure + - kubenet + type: string + networkPolicy: + description: NetworkPolicy used for building Kubernetes network. + enum: + - azure + - calico + type: string + outboundType: + description: Outbound configuration used by Nodes. + enum: + - loadBalancer + - managedNATGateway + - userAssignedNATGateway + - userDefinedRouting + type: string + sku: + description: SKU is the SKU of the AKS to be provisioned. + properties: + tier: + description: Tier - Tier of an AKS cluster. + enum: + - Free + - Paid + - Standard + type: string + required: + - tier + type: object + subscriptionID: + description: SubscriptionID is the GUID of the Azure subscription + to hold this cluster. + type: string + version: + description: Version defines the desired Kubernetes version. + minLength: 2 + type: string + virtualNetwork: + description: VirtualNetwork describes the vnet for the AKS + cluster. Will be created if it does not exist. + properties: + cidrBlock: + type: string + name: + type: string + subnet: + description: ManagedControlPlaneSubnet describes a subnet + for an AKS cluster. + properties: + cidrBlock: + type: string + name: + type: string + privateEndpoints: + description: PrivateEndpoints is a slice of Virtual + Network private endpoints to create for the subnets. + items: + description: PrivateEndpointSpec configures an Azure + Private Endpoint. + properties: + applicationSecurityGroups: + description: ApplicationSecurityGroups specifies + the Application security group in which the + private endpoint IP configuration is included. + items: + type: string + type: array + customNetworkInterfaceName: + description: CustomNetworkInterfaceName specifies + the network interface name associated with + the private endpoint. + type: string + location: + description: Location specifies the region to + create the private endpoint. + type: string + manualApproval: + description: ManualApproval specifies if the + connection approval needs to be done manually + or not. Set it true when the network admin + does not have access to approve connections + to the remote resource. Defaults to false. + type: boolean + name: + description: Name specifies the name of the + private endpoint. + type: string + privateIPAddresses: + description: PrivateIPAddresses specifies the + IP addresses for the network interface associated + with the private endpoint. They have to be + part of the subnet where the private endpoint + is linked. + items: + type: string + type: array + privateLinkServiceConnections: + description: PrivateLinkServiceConnections specifies + Private Link Service Connections of the private + endpoint. + items: + description: PrivateLinkServiceConnection + defines the specification for a private + link service connection associated with + a private endpoint. + properties: + groupIDs: + description: GroupIDs specifies the ID(s) + of the group(s) obtained from the remote + resource that this private endpoint + should connect to. + items: + type: string + type: array + name: + description: Name specifies the name of + the private link service. + type: string + privateLinkServiceID: + description: PrivateLinkServiceID specifies + the resource ID of the private link + service. + type: string + requestMessage: + description: RequestMessage specifies + a message passed to the owner of the + remote resource with the private endpoint + connection request. + maxLength: 140 + type: string + type: object + type: array + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + serviceEndpoints: + description: ServiceEndpoints is a slice of Virtual + Network service endpoints to enable for the subnets. + items: + description: ServiceEndpointSpec configures an Azure + Service Endpoint. + properties: + locations: + items: + type: string + type: array + service: + type: string + required: + - locations + - service + type: object + type: array + x-kubernetes-list-map-keys: + - service + x-kubernetes-list-type: map + required: + - cidrBlock + - name + type: object + required: + - cidrBlock + - name + type: object + required: + - location + - version + type: object + required: + - spec + type: object + required: + - template + type: object + type: object + served: true + storage: true diff --git a/config/crd/bases/infrastructure.cluster.x-k8s.io_azuremanagedmachinepooltemplates.yaml b/config/crd/bases/infrastructure.cluster.x-k8s.io_azuremanagedmachinepooltemplates.yaml new file mode 100644 index 00000000000..29761720eaa --- /dev/null +++ b/config/crd/bases/infrastructure.cluster.x-k8s.io_azuremanagedmachinepooltemplates.yaml @@ -0,0 +1,612 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.9.2 + creationTimestamp: null + name: azuremanagedmachinepooltemplates.infrastructure.cluster.x-k8s.io +spec: + group: infrastructure.cluster.x-k8s.io + names: + categories: + - cluster-api + kind: AzureManagedMachinePoolTemplate + listKind: AzureManagedMachinePoolTemplateList + plural: azuremanagedmachinepooltemplates + singular: azuremanagedmachinepooltemplate + scope: Namespaced + versions: + - name: v1beta1 + schema: + openAPIV3Schema: + description: AzureManagedMachinePoolTemplate is the Schema for the AzureManagedMachinePoolTemplates + API. + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: AzureManagedMachinePoolTemplateSpec defines the desired state + of AzureManagedMachinePoolTemplate. + properties: + template: + description: AzureManagedMachinePoolTemplateResource describes the + data needed to create an AzureManagedCluster from a template. + properties: + spec: + description: AzureManagedMachinePoolTemplateResourceSpec specifies + an Azure managed control plane template resource. + properties: + additionalTags: + additionalProperties: + type: string + description: AdditionalTags is an optional set of tags to + add to Azure resources managed by the Azure provider, in + addition to the ones added by default. + type: object + availabilityZones: + description: AvailabilityZones - Availability zones for nodes. + Must use VirtualMachineScaleSets AgentPoolType. Immutable. + items: + type: string + type: array + enableFIPS: + description: EnableFIPS indicates whether FIPS is enabled + on the node pool. Immutable. + type: boolean + enableNodePublicIP: + description: EnableNodePublicIP controls whether or not nodes + in the pool each have a public IP address. Immutable. + type: boolean + enableUltraSSD: + description: EnableUltraSSD enables the storage type UltraSSD_LRS + for the agent pool. Immutable. + type: boolean + kubeletConfig: + description: KubeletConfig specifies the kubelet configurations + for nodes. Immutable. + properties: + allowedUnsafeSysctls: + description: AllowedUnsafeSysctls - Allowlist of unsafe + sysctls or unsafe sysctl patterns (ending in `*`). Valid + values match `kernel.shm*`, `kernel.msg*`, `kernel.sem`, + `fs.mqueue.*`, or `net.*`. + items: + type: string + type: array + containerLogMaxFiles: + description: ContainerLogMaxFiles - The maximum number + of container log files that can be present for a container. + The number must be ≥ 2. + format: int32 + minimum: 2 + type: integer + containerLogMaxSizeMB: + description: ContainerLogMaxSizeMB - The maximum size + in MB of a container log file before it is rotated. + format: int32 + type: integer + cpuCfsQuota: + description: CPUCfsQuota - Enable CPU CFS quota enforcement + for containers that specify CPU limits. + type: boolean + cpuCfsQuotaPeriod: + description: CPUCfsQuotaPeriod - Sets CPU CFS quota period + value. Must end in "ms", e.g. "100ms" + type: string + cpuManagerPolicy: + description: CPUManagerPolicy - CPU Manager policy to + use. + enum: + - none + - static + type: string + failSwapOn: + description: FailSwapOn - If set to true it will make + the Kubelet fail to start if swap is enabled on the + node. + type: boolean + imageGcHighThreshold: + description: ImageGcHighThreshold - The percent of disk + usage after which image garbage collection is always + run. Valid values are 0-100 (inclusive). + format: int32 + maximum: 100 + minimum: 0 + type: integer + imageGcLowThreshold: + description: ImageGcLowThreshold - The percent of disk + usage before which image garbage collection is never + run. Valid values are 0-100 (inclusive) and must be + less than `imageGcHighThreshold`. + format: int32 + maximum: 100 + minimum: 0 + type: integer + podMaxPids: + description: PodMaxPids - The maximum number of processes + per pod. Must not exceed kernel PID limit. -1 disables + the limit. + format: int32 + minimum: -1 + type: integer + topologyManagerPolicy: + description: TopologyManagerPolicy - Topology Manager + policy to use. + enum: + - none + - best-effort + - restricted + - single-numa-node + type: string + type: object + kubeletDiskType: + description: "KubeletDiskType specifies the kubelet disk type. + Default to OS. Possible values include: 'OS', 'Temporary'. + Requires Microsoft.ContainerService/KubeletDisk preview + feature to be set. Immutable. See also [AKS doc]. \n [AKS + doc]: https://learn.microsoft.com/rest/api/aks/agent-pools/create-or-update?tabs=HTTP#kubeletdisktype" + enum: + - OS + - Temporary + type: string + linuxOSConfig: + description: LinuxOSConfig specifies the custom Linux OS settings + and configurations. Immutable. + properties: + swapFileSizeMB: + description: "SwapFileSizeMB specifies size in MB of a + swap file will be created on the agent nodes from this + node pool. Max value of SwapFileSizeMB should be the + size of temporary disk(/dev/sdb). Must be at least 1. + See also [AKS doc]. \n [AKS doc]: https://learn.microsoft.com/azure/virtual-machines/managed-disks-overview#temporary-disk" + format: int32 + minimum: 1 + type: integer + sysctls: + description: Sysctl specifies the settings for Linux agent + nodes. + properties: + fsAioMaxNr: + description: FsAioMaxNr specifies the maximum number + of system-wide asynchronous io requests. Valid values + are 65536-6553500 (inclusive). Maps to fs.aio-max-nr. + format: int32 + maximum: 6553500 + minimum: 65536 + type: integer + fsFileMax: + description: FsFileMax specifies the max number of + file-handles that the Linux kernel will allocate, + by increasing increases the maximum number of open + files permitted. Valid values are 8192-12000500 + (inclusive). Maps to fs.file-max. + format: int32 + maximum: 12000500 + minimum: 8192 + type: integer + fsInotifyMaxUserWatches: + description: FsInotifyMaxUserWatches specifies the + number of file watches allowed by the system. Each + watch is roughly 90 bytes on a 32-bit kernel, and + roughly 160 bytes on a 64-bit kernel. Valid values + are 781250-2097152 (inclusive). Maps to fs.inotify.max_user_watches. + format: int32 + maximum: 2097152 + minimum: 781250 + type: integer + fsNrOpen: + description: FsNrOpen specifies the maximum number + of file-handles a process can allocate. Valid values + are 8192-20000500 (inclusive). Maps to fs.nr_open. + format: int32 + maximum: 20000500 + minimum: 8192 + type: integer + kernelThreadsMax: + description: KernelThreadsMax specifies the maximum + number of all threads that can be created. Valid + values are 20-513785 (inclusive). Maps to kernel.threads-max. + format: int32 + maximum: 513785 + minimum: 20 + type: integer + netCoreNetdevMaxBacklog: + description: NetCoreNetdevMaxBacklog specifies maximum + number of packets, queued on the INPUT side, when + the interface receives packets faster than kernel + can process them. Valid values are 1000-3240000 + (inclusive). Maps to net.core.netdev_max_backlog. + format: int32 + maximum: 3240000 + minimum: 1000 + type: integer + netCoreOptmemMax: + description: NetCoreOptmemMax specifies the maximum + ancillary buffer size (option memory buffer) allowed + per socket. Socket option memory is used in a few + cases to store extra structures relating to usage + of the socket. Valid values are 20480-4194304 (inclusive). + Maps to net.core.optmem_max. + format: int32 + maximum: 4194304 + minimum: 20480 + type: integer + netCoreRmemDefault: + description: NetCoreRmemDefault specifies the default + receive socket buffer size in bytes. Valid values + are 212992-134217728 (inclusive). Maps to net.core.rmem_default. + format: int32 + maximum: 134217728 + minimum: 212992 + type: integer + netCoreRmemMax: + description: NetCoreRmemMax specifies the maximum + receive socket buffer size in bytes. Valid values + are 212992-134217728 (inclusive). Maps to net.core.rmem_max. + format: int32 + maximum: 134217728 + minimum: 212992 + type: integer + netCoreSomaxconn: + description: NetCoreSomaxconn specifies maximum number + of connection requests that can be queued for any + given listening socket. An upper limit for the value + of the backlog parameter passed to the listen(2)(https://man7.org/linux/man-pages/man2/listen.2.html) + function. If the backlog argument is greater than + the somaxconn, then it's silently truncated to this + limit. Valid values are 4096-3240000 (inclusive). + Maps to net.core.somaxconn. + format: int32 + maximum: 3240000 + minimum: 4096 + type: integer + netCoreWmemDefault: + description: NetCoreWmemDefault specifies the default + send socket buffer size in bytes. Valid values are + 212992-134217728 (inclusive). Maps to net.core.wmem_default. + format: int32 + maximum: 134217728 + minimum: 212992 + type: integer + netCoreWmemMax: + description: NetCoreWmemMax specifies the maximum + send socket buffer size in bytes. Valid values are + 212992-134217728 (inclusive). Maps to net.core.wmem_max. + format: int32 + maximum: 134217728 + minimum: 212992 + type: integer + netIpv4IPLocalPortRange: + description: NetIpv4IPLocalPortRange is used by TCP + and UDP traffic to choose the local port on the + agent node. PortRange should be specified in the + format "first last". First, being an integer, must + be between [1024 - 60999]. Last, being an integer, + must be between [32768 - 65000]. Maps to net.ipv4.ip_local_port_range. + type: string + netIpv4NeighDefaultGcThresh1: + description: NetIpv4NeighDefaultGcThresh1 specifies + the minimum number of entries that may be in the + ARP cache. Garbage collection won't be triggered + if the number of entries is below this setting. + Valid values are 128-80000 (inclusive). Maps to + net.ipv4.neigh.default.gc_thresh1. + format: int32 + maximum: 80000 + minimum: 128 + type: integer + netIpv4NeighDefaultGcThresh2: + description: NetIpv4NeighDefaultGcThresh2 specifies + soft maximum number of entries that may be in the + ARP cache. ARP garbage collection will be triggered + about 5 seconds after reaching this soft maximum. + Valid values are 512-90000 (inclusive). Maps to + net.ipv4.neigh.default.gc_thresh2. + format: int32 + maximum: 90000 + minimum: 512 + type: integer + netIpv4NeighDefaultGcThresh3: + description: NetIpv4NeighDefaultGcThresh3 specified + hard maximum number of entries in the ARP cache. + Valid values are 1024-100000 (inclusive). Maps to + net.ipv4.neigh.default.gc_thresh3. + format: int32 + maximum: 100000 + minimum: 1024 + type: integer + netIpv4TCPFinTimeout: + description: NetIpv4TCPFinTimeout specifies the length + of time an orphaned connection will remain in the + FIN_WAIT_2 state before it's aborted at the local + end. Valid values are 5-120 (inclusive). Maps to + net.ipv4.tcp_fin_timeout. + format: int32 + maximum: 120 + minimum: 5 + type: integer + netIpv4TCPKeepaliveProbes: + description: NetIpv4TCPKeepaliveProbes specifies the + number of keepalive probes TCP sends out, until + it decides the connection is broken. Valid values + are 1-15 (inclusive). Maps to net.ipv4.tcp_keepalive_probes. + format: int32 + maximum: 15 + minimum: 1 + type: integer + netIpv4TCPKeepaliveTime: + description: NetIpv4TCPKeepaliveTime specifies the + rate at which TCP sends out a keepalive message + when keepalive is enabled. Valid values are 30-432000 + (inclusive). Maps to net.ipv4.tcp_keepalive_time. + format: int32 + maximum: 432000 + minimum: 30 + type: integer + netIpv4TCPMaxSynBacklog: + description: NetIpv4TCPMaxSynBacklog specifies the + maximum number of queued connection requests that + have still not received an acknowledgment from the + connecting client. If this number is exceeded, the + kernel will begin dropping requests. Valid values + are 128-3240000 (inclusive). Maps to net.ipv4.tcp_max_syn_backlog. + format: int32 + maximum: 3240000 + minimum: 128 + type: integer + netIpv4TCPMaxTwBuckets: + description: NetIpv4TCPMaxTwBuckets specifies maximal + number of timewait sockets held by system simultaneously. + If this number is exceeded, time-wait socket is + immediately destroyed and warning is printed. Valid + values are 8000-1440000 (inclusive). Maps to net.ipv4.tcp_max_tw_buckets. + format: int32 + maximum: 1440000 + minimum: 8000 + type: integer + netIpv4TCPTwReuse: + description: NetIpv4TCPTwReuse is used to allow to + reuse TIME-WAIT sockets for new connections when + it's safe from protocol viewpoint. Maps to net.ipv4.tcp_tw_reuse. + type: boolean + netIpv4TCPkeepaliveIntvl: + description: NetIpv4TCPkeepaliveIntvl specifies the + frequency of the probes sent out. Multiplied by + tcpKeepaliveprobes, it makes up the time to kill + a connection that isn't responding, after probes + started. Valid values are 1-75 (inclusive). Maps + to net.ipv4.tcp_keepalive_intvl. + format: int32 + maximum: 75 + minimum: 1 + type: integer + netNetfilterNfConntrackBuckets: + description: NetNetfilterNfConntrackBuckets specifies + the size of hash table used by nf_conntrack module + to record the established connection record of the + TCP protocol. Valid values are 65536-147456 (inclusive). + Maps to net.netfilter.nf_conntrack_buckets. + format: int32 + maximum: 147456 + minimum: 65536 + type: integer + netNetfilterNfConntrackMax: + description: NetNetfilterNfConntrackMax specifies + the maximum number of connections supported by the + nf_conntrack module or the size of connection tracking + table. Valid values are 131072-1048576 (inclusive). + Maps to net.netfilter.nf_conntrack_max. + format: int32 + maximum: 1048576 + minimum: 131072 + type: integer + vmMaxMapCount: + description: VMMaxMapCount specifies the maximum number + of memory map areas a process may have. Maps to + vm.max_map_count. Valid values are 65530-262144 + (inclusive). + format: int32 + maximum: 262144 + minimum: 65530 + type: integer + vmSwappiness: + description: VMSwappiness specifies aggressiveness + of the kernel in swapping memory pages. Higher values + will increase aggressiveness, lower values decrease + the amount of swap. Valid values are 0-100 (inclusive). + Maps to vm.swappiness. + format: int32 + maximum: 100 + minimum: 0 + type: integer + vmVfsCachePressure: + description: VMVfsCachePressure specifies the percentage + value that controls tendency of the kernel to reclaim + the memory, which is used for caching of directory + and inode objects. Valid values are 1-500 (inclusive). + Maps to vm.vfs_cache_pressure. + format: int32 + maximum: 500 + minimum: 1 + type: integer + type: object + transparentHugePageDefrag: + description: "TransparentHugePageDefrag specifies whether + the kernel should make aggressive use of memory compaction + to make more hugepages available. See also [Linux doc]. + \n [Linux doc]: https://www.kernel.org/doc/html/latest/admin-guide/mm/transhuge.html#admin-guide-transhuge + for more details." + enum: + - always + - defer + - defer+madvise + - madvise + - never + type: string + transparentHugePageEnabled: + description: "TransparentHugePageEnabled specifies various + modes of Transparent Hugepages. See also [Linux doc]. + \n [Linux doc]: https://www.kernel.org/doc/html/latest/admin-guide/mm/transhuge.html#admin-guide-transhuge + for more details." + enum: + - always + - madvise + - never + type: string + type: object + maxPods: + description: "MaxPods specifies the kubelet `--max-pods` configuration + for the node pool. Immutable. See also [AKS doc], [K8s doc]. + \n [AKS doc]: https://learn.microsoft.com/azure/aks/configure-azure-cni#configure-maximum---new-clusters + [K8s doc]: https://kubernetes.io/docs/reference/command-line-tools-reference/kubelet/" + format: int32 + type: integer + mode: + description: 'Mode - represents mode of an agent pool. Possible + values include: System, User.' + enum: + - System + - User + type: string + name: + description: Name - name of the agent pool. If not specified, + CAPZ uses the name of the CR as the agent pool name. Immutable. + type: string + nodeLabels: + additionalProperties: + type: string + description: "Node labels - labels for all of the nodes present + in node pool. See also [AKS doc]. \n [AKS doc]: https://learn.microsoft.com/azure/aks/use-labels" + type: object + nodePublicIPPrefixID: + description: NodePublicIPPrefixID specifies the public IP + prefix resource ID which VM nodes should use IPs from. Immutable. + type: string + osDiskSizeGB: + description: OSDiskSizeGB is the disk size for every machine + in this agent pool. If you specify 0, it will apply the + default osDisk size according to the vmSize specified. Immutable. + format: int32 + type: integer + osDiskType: + default: Managed + description: "OsDiskType specifies the OS disk type for each + node in the pool. Allowed values are 'Ephemeral' and 'Managed' + (default). Immutable. See also [AKS doc]. \n [AKS doc]: + https://learn.microsoft.com/azure/aks/cluster-configuration#ephemeral-os" + enum: + - Ephemeral + - Managed + type: string + osType: + description: "OSType specifies the virtual machine operating + system. Default to Linux. Possible values include: 'Linux', + 'Windows'. 'Windows' requires the AzureManagedControlPlane's + `spec.networkPlugin` to be `azure`. Immutable. See also + [AKS doc]. \n [AKS doc]: https://learn.microsoft.com/rest/api/aks/agent-pools/create-or-update?tabs=HTTP#ostype" + enum: + - Linux + - Windows + type: string + scaleDownMode: + default: Delete + description: 'ScaleDownMode affects the cluster autoscaler + behavior. Default to Delete. Possible values include: ''Deallocate'', + ''Delete''' + enum: + - Deallocate + - Delete + type: string + scaleSetPriority: + description: 'ScaleSetPriority specifies the ScaleSetPriority + value. Default to Regular. Possible values include: ''Regular'', + ''Spot'' Immutable.' + enum: + - Regular + - Spot + type: string + scaling: + description: Scaling specifies the autoscaling parameters + for the node pool. + properties: + maxSize: + description: MaxSize is the maximum number of nodes for + auto-scaling. + format: int32 + type: integer + minSize: + description: MinSize is the minimum number of nodes for + auto-scaling. + format: int32 + type: integer + type: object + sku: + description: SKU is the size of the VMs in the node pool. + Immutable. + type: string + spotMaxPrice: + anyOf: + - type: integer + - type: string + description: SpotMaxPrice defines max price to pay for spot + instance. Possible values are any decimal value greater + than zero or -1. If you set the max price to be -1, the + VM won't be evicted based on price. The price for the VM + will be the current price for spot or the price for a standard + VM, which ever is less, as long as there's capacity and + quota available. + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + subnetName: + description: SubnetName specifies the Subnet where the MachinePool + will be placed Immutable. + type: string + taints: + description: "Taints specifies the taints for nodes present + in this agent pool. See also [AKS doc]. \n [AKS doc]: https://learn.microsoft.com/azure/aks/use-multiple-node-pools#setting-node-pool-taints" + items: + description: Taint represents a Kubernetes taint. + properties: + effect: + description: Effect specifies the effect for the taint + enum: + - NoSchedule + - NoExecute + - PreferNoSchedule + type: string + key: + description: Key is the key of the taint + type: string + value: + description: Value is the value of the taint + type: string + required: + - effect + - key + - value + type: object + type: array + required: + - mode + - sku + type: object + required: + - spec + type: object + required: + - template + type: object + type: object + served: true + storage: true diff --git a/config/crd/kustomization.yaml b/config/crd/kustomization.yaml index f41ea1f58f7..bfb526fd42c 100644 --- a/config/crd/kustomization.yaml +++ b/config/crd/kustomization.yaml @@ -15,6 +15,9 @@ resources: - bases/infrastructure.cluster.x-k8s.io_azuremanagedclusters.yaml - bases/infrastructure.cluster.x-k8s.io_azuremanagedcontrolplanes.yaml - bases/infrastructure.cluster.x-k8s.io_azuremachinepoolmachines.yaml + - bases/infrastructure.cluster.x-k8s.io_azuremanagedclustertemplates.yaml + - bases/infrastructure.cluster.x-k8s.io_azuremanagedcontrolplanetemplates.yaml + - bases/infrastructure.cluster.x-k8s.io_azuremanagedmachinepooltemplates.yaml # +kubebuilder:scaffold:crdkustomizeresource diff --git a/config/webhook/manifests.yaml b/config/webhook/manifests.yaml index 90a8fe4a8c8..dcd4f04c897 100644 --- a/config/webhook/manifests.yaml +++ b/config/webhook/manifests.yaml @@ -114,6 +114,28 @@ webhooks: resources: - azuremanagedcontrolplanes sideEffects: None +- admissionReviewVersions: + - v1 + - v1beta1 + clientConfig: + service: + name: webhook-service + namespace: system + path: /mutate-infrastructure-cluster-x-k8s-io-v1beta1-azuremanagedcontrolplanetemplate + failurePolicy: Fail + matchPolicy: Equivalent + name: default.azuremanagedcontrolplanetemplate.infrastructure.cluster.x-k8s.io + rules: + - apiGroups: + - infrastructure.cluster.x-k8s.io + apiVersions: + - v1beta1 + operations: + - CREATE + - UPDATE + resources: + - azuremanagedcontrolplanetemplates + sideEffects: None - admissionReviewVersions: - v1 - v1beta1 @@ -295,6 +317,26 @@ webhooks: resources: - azuremanagedclusters sideEffects: None +- admissionReviewVersions: + - v1 + - v1beta1 + clientConfig: + service: + name: webhook-service + namespace: system + path: /validate-infrastructure-cluster-x-k8s-io-v1beta1-azuremanagedclustertemplate + failurePolicy: Fail + name: validation.azuremanagedclustertemplates.infrastructure.cluster.x-k8s.io + rules: + - apiGroups: + - infrastructure.cluster.x-k8s.io + apiVersions: + - v1beta1 + operations: + - UPDATE + resources: + - azuremanagedclustertemplates + sideEffects: None - admissionReviewVersions: - v1 - v1beta1 @@ -316,6 +358,28 @@ webhooks: resources: - azuremanagedcontrolplanes sideEffects: None +- admissionReviewVersions: + - v1 + - v1beta1 + clientConfig: + service: + name: webhook-service + namespace: system + path: /validate-infrastructure-cluster-x-k8s-io-v1beta1-azuremanagedcontrolplanetemplate + failurePolicy: Fail + matchPolicy: Equivalent + name: validation.azuremanagedcontrolplanetemplate.infrastructure.cluster.x-k8s.io + rules: + - apiGroups: + - infrastructure.cluster.x-k8s.io + apiVersions: + - v1beta1 + operations: + - CREATE + - UPDATE + resources: + - azuremanagedcontrolplanetemplates + sideEffects: None - admissionReviewVersions: - v1 - v1beta1 diff --git a/controllers/helpers.go b/controllers/helpers.go index 9d8b40c98b6..f7d1ce3c23f 100644 --- a/controllers/helpers.go +++ b/controllers/helpers.go @@ -945,6 +945,11 @@ func MachinePoolToAzureManagedControlPlaneMapFunc(ctx context.Context, c client. gk := gvk.GroupKind() ref := cluster.Spec.ControlPlaneRef + fmt.Printf("WILLIE cluster spec: %v", cluster.Spec) + if ref == nil || ref.Name == "" { + log.Error(err, "control plane ref is nil or empty") + return nil + } // Return early if the GroupKind doesn't match what we expect. controlPlaneGK := ref.GroupVersionKind().GroupKind() if gk != controlPlaneGK { diff --git a/docs/book/src/topics/clusterclass.md b/docs/book/src/topics/clusterclass.md new file mode 100644 index 00000000000..326f82015a1 --- /dev/null +++ b/docs/book/src/topics/clusterclass.md @@ -0,0 +1,88 @@ +# ClusterClass + +- **Feature status:** GA +- **Feature gate:** MachinePool=true ClusterTopology=true + +[ClusterClass](https://cluster-api.sigs.k8s.io/tasks/experimental-features/cluster-class/index.html) is a collection of templates that define a topology (control plane and machine deployments) to be used to continuously reconcile one or more Clusters. It is a new Cluster API feature that is built on top of the existing Cluster API resources and provides a set of tools and operations to streamline cluster lifecycle management while maintaining the same underlying API. + +CAPZ currently supports ClusterClass for both managed (AKS) and self-managed clusters. CAPZ implements this with three custom resources: +1. AzureClusterTemplate +2. AzureManagedClusterTemplate +3. AzureManagedControlPlaneTemplate + +Each resource is a template for the corresponding CAPZ resource. For example, the AzureClusterTemplate is a template for the CAPZ AzureCluster resource. The template contains a set of parameters that are able to be shared across multiple clusters. + +## Deploying a Self-Managed Cluster with ClusterClass + +To deploy a self-managed cluster with ClusterClass, you must first create a ClusterClass resource. The ClusterClass resource defines the cluster topology, including the control plane and machine deployment templates. The ClusterClass resource also defines the parameters that can be used to customize the cluster topology. + +Please refer to the Cluster API book for more information on how to write a ClusterClass topology: https://cluster-api.sigs.k8s.io/tasks/experimental-features/cluster-class/write-clusterclass.html + +For a self-managed cluster, the AzureClusterTemplate is used to define the Azure infrastructure for the cluster. The following example shows a basic AzureClusterTemplate resource: + +```yaml +apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 +kind: AzureClusterTemplate +metadata: + name: capz-clusterclass-cluster + namespace: default +spec: + template: + spec: + location: westus2 + networkSpec: + subnets: + - name: control-plane-subnet + role: control-plane + - name: node-subnet + natGateway: + name: node-natgateway + role: node + subscriptionID: 00000000-0000-0000-0000-000000000000 +``` + +## Deploying a Managed Cluster (AKS) with ClusterClass + +Deploying an AKS cluster with ClusterClass is similar to deploying a self-managed cluster. Instead of using the AzureClusterTemplate, you must use both an AzureManagedClusterTemplate and AzureManagedControlPlaneTemplate. Due to the nature of managed Kubernetes and the control plane implementation, the infrastructure provider (and therefore the AzureManagedClusterTemplate) for AKS cluster is basically a no-op. The AzureManagedControlPlaneTemplate is used to define the AKS cluster configuration, such as the Kubernetes version and the number of nodes. + +The following example shows a basic AzureManagedClusterTemplate and AzureManagedControlPlaneTemplate resource: + +```yaml +apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 +kind: AzureManagedClusterTemplate +metadata: + name: capz-clusterclass-cluster +--- +apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 +kind: AzureManagedControlPlaneTemplate +metadata: + name: capz-clusterclass-control-plane +spec: + location: westus2 + subscriptionID: 00000000-0000-0000-0000-000000000000 + version: 1.25.2 +``` + +## Excluded Fields + +Since a ClusterClass is a template for a Cluster, there are some fields that are not allowed to be shared across multiple clusters. For each of the ClusterClass resources, the following fields are excluded: + +### AzureClusterTemplate +- `spec.resourceGroup` +- `spec.controlPlaneEndpoint` +- `spec.bastionSpec.azureBastion.name` +- `spec.bastionSpec.azureBastion.subnetSpec.routeTable` +- `spec.bastionSpec.azureBastion.publicIP` +- `spec.bastionSpec.azureBastion.sku` +- `spec.bastionSpec.azureBastion.enableTunneling` + +### AzureManagedControlPlaneTemplate + +- `spec.resourceGroupName` +- `spec.nodeResourceGroupName` +- `spec.virtualNetwork.name` +- `spec.virtualNetwork.subnet` +- `spec.virtualNetwork.resourceGroup` +- `spec.controlPlaneEndpoint` +- `spec.sshPublicKey` +- `spec.apiServerAccessProfile.authorizedIPRanges` diff --git a/main.go b/main.go index 3f79e50bad6..46fbd6f67a5 100644 --- a/main.go +++ b/main.go @@ -519,6 +519,11 @@ func registerWebhooks(mgr manager.Manager) { os.Exit(1) } + if err := (&infrav1.AzureManagedClusterTemplate{}).SetupWebhookWithManager(mgr); err != nil { + setupLog.Error(err, "unable to create webhook", "webhook", "AzureManagedClusterTemplate") + os.Exit(1) + } + if err := infrav1exp.SetupAzureMachinePoolWebhookWithManager(mgr); err != nil { setupLog.Error(err, "unable to create webhook", "webhook", "AzureMachinePool") os.Exit(1) @@ -539,6 +544,11 @@ func registerWebhooks(mgr manager.Manager) { os.Exit(1) } + if err := infrav1.SetupAzureManagedControlPlaneTemplateWithManager(mgr); err != nil { + setupLog.Error(err, "unable to create webhook", "webhook", "AzureManagedControlPlaneTemplate") + os.Exit(1) + } + if err := mgr.AddReadyzCheck("webhook", mgr.GetWebhookServer().StartedChecker()); err != nil { setupLog.Error(err, "unable to create ready check") os.Exit(1) diff --git a/templates/cluster-template-aks-clusterclass-cluster.yaml b/templates/cluster-template-aks-clusterclass-cluster.yaml new file mode 100644 index 00000000000..c78efb4b389 --- /dev/null +++ b/templates/cluster-template-aks-clusterclass-cluster.yaml @@ -0,0 +1,21 @@ +apiVersion: cluster.x-k8s.io/v1beta1 +kind: Cluster +metadata: + name: ${CLUSTER_NAME} + namespace: default +spec: + clusterNetwork: + pods: + cidrBlocks: + - 192.168.0.0/16 + topology: + class: ${CLUSTER_CLASS_NAME} + version: ${KUBERNETES_VERSION} + workers: + machinePools: + - class: default-system + name: mp-0 + replicas: 1 + - class: default-worker + name: mp-1 + replicas: 1 diff --git a/templates/cluster-template-aks-clusterclass.yaml b/templates/cluster-template-aks-clusterclass.yaml new file mode 100644 index 00000000000..7c91181f416 --- /dev/null +++ b/templates/cluster-template-aks-clusterclass.yaml @@ -0,0 +1,155 @@ +apiVersion: cluster.x-k8s.io/v1beta1 +kind: ClusterClass +metadata: + name: ${CLUSTER_CLASS_NAME} + namespace: default +spec: + controlPlane: + ref: + apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + kind: AzureManagedControlPlaneTemplate + name: ${CLUSTER_NAME}-control-plane + infrastructure: + ref: + apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + kind: AzureManagedClusterTemplate + name: ${CLUSTER_NAME} + workers: + machinePools: + - class: default-system + template: + bootstrap: + ref: + apiVersion: bootstrap.cluster.x-k8s.io/v1beta1 + kind: KubeadmConfigTemplate + name: ${CLUSTER_NAME}-pool0 + infrastructure: + ref: + apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + kind: AzureManagedMachinePoolTemplate + name: ${CLUSTER_NAME}-pool0 + - class: default-worker + template: + bootstrap: + ref: + apiVersion: bootstrap.cluster.x-k8s.io/v1beta1 + kind: KubeadmConfigTemplate + name: ${CLUSTER_NAME}-pool1 + infrastructure: + ref: + apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + kind: AzureManagedMachinePoolTemplate + name: ${CLUSTER_NAME}-pool1 +--- +apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 +kind: AzureManagedControlPlaneTemplate +metadata: + name: ${CLUSTER_NAME}-control-plane + namespace: default +spec: + template: + spec: + identityRef: + apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + kind: AzureClusterIdentity + name: ${CLUSTER_IDENTITY_NAME} + location: ${AZURE_LOCATION} + subscriptionID: ${AZURE_SUBSCRIPTION_ID} + version: ${KUBERNETES_VERSION} +--- +apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 +kind: AzureManagedClusterTemplate +metadata: + name: ${CLUSTER_NAME} + namespace: default +spec: + template: + spec: {} +--- +apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 +kind: AzureManagedMachinePoolTemplate +metadata: + name: ${CLUSTER_NAME}-pool0 + namespace: default +spec: + template: + spec: + mode: System + name: pool0 + sku: ${AZURE_NODE_MACHINE_TYPE} +--- +apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 +kind: AzureManagedMachinePoolTemplate +metadata: + name: ${CLUSTER_NAME}-pool1 + namespace: default +spec: + template: + spec: + mode: User + name: pool1 + sku: ${AZURE_NODE_MACHINE_TYPE} +--- +apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 +kind: AzureClusterIdentity +metadata: + labels: + clusterctl.cluster.x-k8s.io/move-hierarchy: "true" + name: ${CLUSTER_IDENTITY_NAME} + namespace: default +spec: + allowedNamespaces: {} + clientID: ${AZURE_CLIENT_ID} + clientSecret: + name: ${AZURE_CLUSTER_IDENTITY_SECRET_NAME} + namespace: ${AZURE_CLUSTER_IDENTITY_SECRET_NAMESPACE} + tenantID: ${AZURE_TENANT_ID} + type: ServicePrincipal +--- +apiVersion: bootstrap.cluster.x-k8s.io/v1beta1 +kind: KubeadmConfigTemplate +metadata: + name: ${CLUSTER_NAME}-pool0 + namespace: default +spec: + template: + spec: + files: + - contentFrom: + secret: + key: worker-node-azure.json + name: replace_me + owner: root:root + path: /etc/kubernetes/azure.json + permissions: "0644" + joinConfiguration: + nodeRegistration: + kubeletExtraArgs: + azure-container-registry-config: /etc/kubernetes/azure.json + cloud-provider: external + name: '{{ ds.meta_data["local_hostname"] }}' + preKubeadmCommands: [] +--- +apiVersion: bootstrap.cluster.x-k8s.io/v1beta1 +kind: KubeadmConfigTemplate +metadata: + name: ${CLUSTER_NAME}-pool1 + namespace: default +spec: + template: + spec: + files: + - contentFrom: + secret: + key: worker-node-azure.json + name: replace_me + owner: root:root + path: /etc/kubernetes/azure.json + permissions: "0644" + joinConfiguration: + nodeRegistration: + kubeletExtraArgs: + azure-container-registry-config: /etc/kubernetes/azure.json + cloud-provider: external + name: '{{ ds.meta_data["local_hostname"] }}' + preKubeadmCommands: [] diff --git a/templates/cluster-template-clusterclass-cluster.yaml b/templates/cluster-template-clusterclass-cluster.yaml new file mode 100644 index 00000000000..28ad70f5a82 --- /dev/null +++ b/templates/cluster-template-clusterclass-cluster.yaml @@ -0,0 +1,23 @@ +apiVersion: cluster.x-k8s.io/v1beta1 +kind: Cluster +metadata: + labels: + containerd-logger: enabled + csi-proxy: enabled + name: ${CLUSTER_NAME} + namespace: default +spec: + clusterNetwork: + pods: + cidrBlocks: + - 192.168.0.0/16 + topology: + class: ${CLUSTER_CLASS_NAME} + controlPlane: + replicas: ${CONTROL_PLANE_MACHINE_COUNT} + version: ${KUBERNETES_VERSION} + workers: + machineDeployments: + - class: ${CLUSTER_NAME}-worker + name: md-0 + replicas: ${WORKER_MACHINE_COUNT} diff --git a/templates/cluster-template-clusterclass.yaml b/templates/cluster-template-clusterclass.yaml index 7c9c39da4a2..c68df586f34 100644 --- a/templates/cluster-template-clusterclass.yaml +++ b/templates/cluster-template-clusterclass.yaml @@ -79,30 +79,6 @@ spec: kind: AzureMachineTemplate name: ${CLUSTER_NAME}-md-0 --- -apiVersion: cluster.x-k8s.io/v1beta1 -kind: Cluster -metadata: - labels: - containerd-logger: enabled - csi-proxy: enabled - name: ${CLUSTER_NAME} - namespace: default -spec: - clusterNetwork: - pods: - cidrBlocks: - - 192.168.0.0/16 - topology: - class: ${CLUSTER_CLASS_NAME} - controlPlane: - replicas: ${CONTROL_PLANE_MACHINE_COUNT} - version: ${KUBERNETES_VERSION} - workers: - machineDeployments: - - class: ${CLUSTER_NAME}-worker - name: md-0 - replicas: ${WORKER_MACHINE_COUNT} ---- apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 kind: AzureClusterTemplate metadata: diff --git a/templates/flavors/aks-clusterclass-cluster/cluster.yaml b/templates/flavors/aks-clusterclass-cluster/cluster.yaml new file mode 100644 index 00000000000..c78efb4b389 --- /dev/null +++ b/templates/flavors/aks-clusterclass-cluster/cluster.yaml @@ -0,0 +1,21 @@ +apiVersion: cluster.x-k8s.io/v1beta1 +kind: Cluster +metadata: + name: ${CLUSTER_NAME} + namespace: default +spec: + clusterNetwork: + pods: + cidrBlocks: + - 192.168.0.0/16 + topology: + class: ${CLUSTER_CLASS_NAME} + version: ${KUBERNETES_VERSION} + workers: + machinePools: + - class: default-system + name: mp-0 + replicas: 1 + - class: default-worker + name: mp-1 + replicas: 1 diff --git a/templates/flavors/aks-clusterclass-cluster/kustomization.yaml b/templates/flavors/aks-clusterclass-cluster/kustomization.yaml new file mode 100644 index 00000000000..7a5648beb9c --- /dev/null +++ b/templates/flavors/aks-clusterclass-cluster/kustomization.yaml @@ -0,0 +1,5 @@ +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +namespace: default +resources: +- cluster.yaml diff --git a/templates/flavors/aks-clusterclass/azure-managed-cluster-template.yaml b/templates/flavors/aks-clusterclass/azure-managed-cluster-template.yaml new file mode 100644 index 00000000000..6ad8daa397e --- /dev/null +++ b/templates/flavors/aks-clusterclass/azure-managed-cluster-template.yaml @@ -0,0 +1,9 @@ +apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 +kind: AzureManagedClusterTemplate +metadata: + name: ${CLUSTER_NAME} + namespace: default +spec: + template: + spec: {} + \ No newline at end of file diff --git a/templates/flavors/aks-clusterclass/azure-managed-controlplane-template.yaml b/templates/flavors/aks-clusterclass/azure-managed-controlplane-template.yaml new file mode 100644 index 00000000000..12e31a5c904 --- /dev/null +++ b/templates/flavors/aks-clusterclass/azure-managed-controlplane-template.yaml @@ -0,0 +1,15 @@ +apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 +kind: AzureManagedControlPlaneTemplate +metadata: + name: ${CLUSTER_NAME}-control-plane + namespace: default +spec: + template: + spec: + identityRef: + apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + kind: AzureClusterIdentity + name: ${CLUSTER_IDENTITY_NAME} + location: ${AZURE_LOCATION} + subscriptionID: ${AZURE_SUBSCRIPTION_ID} + version: ${KUBERNETES_VERSION} diff --git a/templates/flavors/aks-clusterclass/azure-managed-machinepool-template.yaml b/templates/flavors/aks-clusterclass/azure-managed-machinepool-template.yaml new file mode 100644 index 00000000000..0c637c91507 --- /dev/null +++ b/templates/flavors/aks-clusterclass/azure-managed-machinepool-template.yaml @@ -0,0 +1,23 @@ +apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 +kind: AzureManagedMachinePoolTemplate +metadata: + name: ${CLUSTER_NAME}-pool0 + namespace: default +spec: + template: + spec: + mode: System + name: pool0 + sku: ${AZURE_NODE_MACHINE_TYPE} +--- +apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 +kind: AzureManagedMachinePoolTemplate +metadata: + name: ${CLUSTER_NAME}-pool1 + namespace: default +spec: + template: + spec: + mode: User + name: pool1 + sku: ${AZURE_NODE_MACHINE_TYPE} \ No newline at end of file diff --git a/templates/flavors/aks-clusterclass/clusterclass.yaml b/templates/flavors/aks-clusterclass/clusterclass.yaml new file mode 100644 index 00000000000..f20813ac9c3 --- /dev/null +++ b/templates/flavors/aks-clusterclass/clusterclass.yaml @@ -0,0 +1,43 @@ +apiVersion: cluster.x-k8s.io/v1beta1 +kind: ClusterClass +metadata: + name: ${CLUSTER_CLASS_NAME} + namespace: default +spec: + controlPlane: + ref: + apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + kind: AzureManagedControlPlaneTemplate + name: ${CLUSTER_NAME}-control-plane + infrastructure: + ref: + apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + kind: AzureManagedClusterTemplate + name: ${CLUSTER_NAME} + workers: + machinePools: + - class: default-system + template: + bootstrap: + ref: + apiVersion: bootstrap.cluster.x-k8s.io/v1beta1 + kind: KubeadmConfigTemplate + name: ${CLUSTER_NAME}-pool0 + infrastructure: + ref: + apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + kind: AzureManagedMachinePoolTemplate + name: ${CLUSTER_NAME}-pool0 + - class: default-worker + template: + bootstrap: + ref: + apiVersion: bootstrap.cluster.x-k8s.io/v1beta1 + kind: KubeadmConfigTemplate + name: ${CLUSTER_NAME}-pool1 + infrastructure: + ref: + apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + kind: AzureManagedMachinePoolTemplate + name: ${CLUSTER_NAME}-pool1 + \ No newline at end of file diff --git a/templates/flavors/aks-clusterclass/kubeadm-config-template.yaml b/templates/flavors/aks-clusterclass/kubeadm-config-template.yaml new file mode 100644 index 00000000000..8b36253b628 --- /dev/null +++ b/templates/flavors/aks-clusterclass/kubeadm-config-template.yaml @@ -0,0 +1,47 @@ +apiVersion: bootstrap.cluster.x-k8s.io/v1beta1 +kind: KubeadmConfigTemplate +metadata: + name: ${CLUSTER_NAME}-pool0 + namespace: default +spec: + template: + spec: + files: + - contentFrom: + secret: + key: worker-node-azure.json + name: replace_me + owner: root:root + path: /etc/kubernetes/azure.json + permissions: "0644" + joinConfiguration: + nodeRegistration: + kubeletExtraArgs: + azure-container-registry-config: /etc/kubernetes/azure.json + cloud-provider: external + name: '{{ ds.meta_data["local_hostname"] }}' + preKubeadmCommands: [] +--- +apiVersion: bootstrap.cluster.x-k8s.io/v1beta1 +kind: KubeadmConfigTemplate +metadata: + name: ${CLUSTER_NAME}-pool1 + namespace: default +spec: + template: + spec: + files: + - contentFrom: + secret: + key: worker-node-azure.json + name: replace_me + owner: root:root + path: /etc/kubernetes/azure.json + permissions: "0644" + joinConfiguration: + nodeRegistration: + kubeletExtraArgs: + azure-container-registry-config: /etc/kubernetes/azure.json + cloud-provider: external + name: '{{ ds.meta_data["local_hostname"] }}' + preKubeadmCommands: [] diff --git a/templates/flavors/aks-clusterclass/kustomization.yaml b/templates/flavors/aks-clusterclass/kustomization.yaml new file mode 100644 index 00000000000..98083341a84 --- /dev/null +++ b/templates/flavors/aks-clusterclass/kustomization.yaml @@ -0,0 +1,10 @@ +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +namespace: default +resources: +- clusterclass.yaml +- azure-managed-controlplane-template.yaml +- azure-managed-cluster-template.yaml +- azure-managed-machinepool-template.yaml +- ../../azure-cluster-identity +- kubeadm-config-template.yaml diff --git a/templates/flavors/aks-clusterclass/patches/managedazurecluster-identity-ref.yaml b/templates/flavors/aks-clusterclass/patches/managedazurecluster-identity-ref.yaml new file mode 100644 index 00000000000..37d7ac29191 --- /dev/null +++ b/templates/flavors/aks-clusterclass/patches/managedazurecluster-identity-ref.yaml @@ -0,0 +1,9 @@ +apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 +kind: AzureManagedControlPlaneTemplate +metadata: + name: ${CLUSTER_NAME}-control-plane +spec: + identityRef: + apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + kind: AzureClusterIdentity + name: "${CLUSTER_IDENTITY_NAME}" \ No newline at end of file diff --git a/templates/flavors/clusterclass/cluster.yaml b/templates/flavors/clusterclass-cluster/cluster.yaml similarity index 100% rename from templates/flavors/clusterclass/cluster.yaml rename to templates/flavors/clusterclass-cluster/cluster.yaml diff --git a/templates/flavors/clusterclass-cluster/kustomization.yaml b/templates/flavors/clusterclass-cluster/kustomization.yaml new file mode 100644 index 00000000000..fd7811e1145 --- /dev/null +++ b/templates/flavors/clusterclass-cluster/kustomization.yaml @@ -0,0 +1,3 @@ +namespace: default +resources: +- cluster.yaml \ No newline at end of file diff --git a/templates/flavors/clusterclass/kustomization.yaml b/templates/flavors/clusterclass/kustomization.yaml index d32392fc71f..30b4276d555 100644 --- a/templates/flavors/clusterclass/kustomization.yaml +++ b/templates/flavors/clusterclass/kustomization.yaml @@ -1,7 +1,6 @@ namespace: default resources: - clusterclass.yaml - - cluster.yaml - azure-cluster-template.yaml - azure-machine-template-controlplane.yaml - azure-machine-template-worker.yaml diff --git a/templates/test/ci/cluster-template-prow-aks-clusterclass.yaml b/templates/test/ci/cluster-template-prow-aks-clusterclass.yaml new file mode 100644 index 00000000000..34da32fe61a --- /dev/null +++ b/templates/test/ci/cluster-template-prow-aks-clusterclass.yaml @@ -0,0 +1,273 @@ +apiVersion: cluster.x-k8s.io/v1beta1 +kind: ClusterClass +metadata: + name: ${CLUSTER_CLASS_NAME} + namespace: default +spec: + controlPlane: + ref: + apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + kind: AzureManagedControlPlaneTemplate + name: ${CLUSTER_NAME}-control-plane + infrastructure: + ref: + apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + kind: AzureManagedClusterTemplate + name: ${CLUSTER_NAME} + patches: + - definitions: + - jsonPatches: + - op: replace + path: /spec/template/spec/files + valueFrom: + template: | + - contentFrom: + secret: + key: worker-node-azure.json + name: "{{ .builtin.machinePool.infrastructureRef.name }}-azure-json" + owner: root:root + path: /etc/kubernetes/azure.json + permissions: "0644" + selector: + apiVersion: bootstrap.cluster.x-k8s.io/v1beta1 + kind: KubeadmConfigTemplate + matchResources: + machinePoolClass: + names: + - default-system + - default-worker + name: workerAzureJsonSecretName + workers: + machinePools: + - class: default-system + template: + bootstrap: + ref: + apiVersion: bootstrap.cluster.x-k8s.io/v1beta1 + kind: KubeadmConfigTemplate + name: ${CLUSTER_NAME}-pool0 + infrastructure: + ref: + apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + kind: AzureManagedMachinePoolTemplate + name: ${CLUSTER_NAME}-pool0 + - class: default-worker + template: + bootstrap: + ref: + apiVersion: bootstrap.cluster.x-k8s.io/v1beta1 + kind: KubeadmConfigTemplate + name: ${CLUSTER_NAME}-pool1 + infrastructure: + ref: + apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + kind: AzureManagedMachinePoolTemplate + name: ${CLUSTER_NAME}-pool1 +--- +apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 +kind: AzureManagedControlPlaneTemplate +metadata: + name: ${CLUSTER_NAME}-control-plane + namespace: default +spec: + template: + spec: + additionalTags: + buildProvenance: ${BUILD_PROVENANCE} + creationTimestamp: ${TIMESTAMP} + jobName: ${JOB_NAME} + addonProfiles: + - enabled: true + name: azurepolicy + identityRef: + apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + kind: AzureClusterIdentity + name: ${CLUSTER_IDENTITY_NAME} + location: ${AZURE_LOCATION} + subscriptionID: ${AZURE_SUBSCRIPTION_ID} + version: ${KUBERNETES_VERSION} +--- +apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 +kind: AzureManagedClusterTemplate +metadata: + name: ${CLUSTER_NAME} + namespace: default +spec: + template: + spec: {} +--- +apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 +kind: AzureManagedMachinePoolTemplate +metadata: + name: ${CLUSTER_NAME}-pool0 + namespace: default +spec: + template: + spec: + availabilityZones: + - "1" + - "2" + enableNodePublicIP: false + enableUltraSSD: true + maxPods: 30 + mode: System + name: pool0 + osDiskSizeGB: 30 + osDiskType: Managed + sku: ${AZURE_AKS_NODE_MACHINE_TYPE:=Standard_D2s_v3} +--- +apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 +kind: AzureManagedMachinePoolTemplate +metadata: + name: ${CLUSTER_NAME}-pool1 + namespace: default +spec: + template: + spec: + enableNodePublicIP: false + kubeletConfig: + allowedUnsafeSysctls: + - net.* + - kernel.msg* + containerLogMaxFiles: 50 + containerLogMaxSizeMB: 500 + cpuCfsQuota: true + cpuCfsQuotaPeriod: 110ms + cpuManagerPolicy: static + failSwapOn: false + imageGcHighThreshold: 70 + imageGcLowThreshold: 50 + podMaxPids: 2048 + topologyManagerPolicy: best-effort + linuxOSConfig: + swapFileSizeMB: 1500 + sysctls: + fsAioMaxNr: 65536 + fsFileMax: 709620 + fsInotifyMaxUserWatches: 1048576 + fsNrOpen: 1048576 + kernelThreadsMax: 55601 + netCoreNetdevMaxBacklog: 1000 + netCoreOptmemMax: 20480 + netCoreRmemDefault: 212992 + netCoreRmemMax: 212992 + netCoreSomaxconn: 16384 + netCoreWmemDefault: 212992 + netCoreWmemMax: 212992 + netIpv4IPLocalPortRange: 32768 60999 + netIpv4NeighDefaultGcThresh1: 4096 + netIpv4NeighDefaultGcThresh2: 8192 + netIpv4NeighDefaultGcThresh3: 16384 + netIpv4TCPFinTimeout: 60 + netIpv4TCPKeepaliveProbes: 9 + netIpv4TCPKeepaliveTime: 7200 + netIpv4TCPMaxSynBacklog: 16384 + netIpv4TCPMaxTwBuckets: 32768 + netIpv4TCPTwReuse: false + netIpv4TCPkeepaliveIntvl: 75 + netNetfilterNfConntrackBuckets: 65536 + netNetfilterNfConntrackMax: 131072 + vmMaxMapCount: 65530 + vmSwappiness: 60 + vmVfsCachePressure: 100 + transparentHugePageDefrag: madvise + transparentHugePageEnabled: always + maxPods: 64 + mode: User + name: pool1 + nodeLabels: + type: shared + osDiskSizeGB: 40 + osDiskType: Ephemeral + scaleSetPriority: Regular + sku: ${AZURE_AKS_NODE_MACHINE_TYPE:=Standard_D2s_v3} + taints: + - effect: NoSchedule + key: type + value: shared +--- +apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 +kind: AzureClusterIdentity +metadata: + labels: + clusterctl.cluster.x-k8s.io/move-hierarchy: "true" + name: ${CLUSTER_IDENTITY_NAME} + namespace: default +spec: + allowedNamespaces: {} + clientID: ${AZURE_CLIENT_ID} + clientSecret: + name: ${AZURE_CLUSTER_IDENTITY_SECRET_NAME} + namespace: ${AZURE_CLUSTER_IDENTITY_SECRET_NAMESPACE} + tenantID: ${AZURE_TENANT_ID} + type: ServicePrincipal +--- +apiVersion: bootstrap.cluster.x-k8s.io/v1beta1 +kind: KubeadmConfigTemplate +metadata: + name: ${CLUSTER_NAME}-pool0 + namespace: default +spec: + template: + spec: + files: + - contentFrom: + secret: + key: worker-node-azure.json + name: replace_me + owner: root:root + path: /etc/kubernetes/azure.json + permissions: "0644" + joinConfiguration: + nodeRegistration: + kubeletExtraArgs: + azure-container-registry-config: /etc/kubernetes/azure.json + cloud-provider: external + name: '{{ ds.meta_data["local_hostname"] }}' + preKubeadmCommands: [] +--- +apiVersion: bootstrap.cluster.x-k8s.io/v1beta1 +kind: KubeadmConfigTemplate +metadata: + name: ${CLUSTER_NAME}-pool1 + namespace: default +spec: + template: + spec: + files: + - contentFrom: + secret: + key: worker-node-azure.json + name: replace_me + owner: root:root + path: /etc/kubernetes/azure.json + permissions: "0644" + joinConfiguration: + nodeRegistration: + kubeletExtraArgs: + azure-container-registry-config: /etc/kubernetes/azure.json + cloud-provider: external + name: '{{ ds.meta_data["local_hostname"] }}' + preKubeadmCommands: [] +--- +apiVersion: cluster.x-k8s.io/v1beta1 +kind: Cluster +metadata: + name: ${CLUSTER_NAME} + namespace: default +spec: + clusterNetwork: + pods: + cidrBlocks: + - 192.168.0.0/16 + topology: + class: default + version: ${KUBERNETES_VERSION} + workers: + machinePools: + - class: default-system + name: mp-0 + replicas: 1 + - class: default-worker + name: mp-1 + replicas: 1 diff --git a/templates/test/ci/prow-aks-clusterclass/kustomization.yaml b/templates/test/ci/prow-aks-clusterclass/kustomization.yaml new file mode 100644 index 00000000000..df8f28bf9cb --- /dev/null +++ b/templates/test/ci/prow-aks-clusterclass/kustomization.yaml @@ -0,0 +1,13 @@ +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +namespace: default +resources: + - ../../../flavors/aks-clusterclass + - ../../../flavors/aks-clusterclass-cluster +patchesStrategicMerge: + - patches/tags-aks-clusterclass.yaml + - patches/aks-clusterclass-pool0.yaml + - patches/aks-clusterclass-pool1.yaml + - patches/cluster.yaml + - patches/addons.yaml + - patches.yaml diff --git a/templates/test/ci/prow-aks-clusterclass/patches.yaml b/templates/test/ci/prow-aks-clusterclass/patches.yaml new file mode 100644 index 00000000000..74d7e896aa7 --- /dev/null +++ b/templates/test/ci/prow-aks-clusterclass/patches.yaml @@ -0,0 +1,54 @@ +apiVersion: cluster.x-k8s.io/v1beta1 +kind: ClusterClass +metadata: + name: ${CLUSTER_CLASS_NAME} +spec: + workers: + machinePools: + - class: default-system + template: + bootstrap: + ref: + apiVersion: bootstrap.cluster.x-k8s.io/v1beta1 + kind: KubeadmConfigTemplate + name: ${CLUSTER_NAME}-pool0 + infrastructure: + ref: + apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + kind: AzureManagedMachinePoolTemplate + name: ${CLUSTER_NAME}-pool0 + - class: default-worker + template: + bootstrap: + ref: + apiVersion: bootstrap.cluster.x-k8s.io/v1beta1 + kind: KubeadmConfigTemplate + name: ${CLUSTER_NAME}-pool1 + infrastructure: + ref: + apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + kind: AzureManagedMachinePoolTemplate + name: ${CLUSTER_NAME}-pool1 + patches: + - name: workerAzureJsonSecretName + definitions: + - selector: + apiVersion: bootstrap.cluster.x-k8s.io/v1beta1 + kind: KubeadmConfigTemplate + matchResources: + machinePoolClass: + names: + - default-system + - default-worker + jsonPatches: + - op: replace + path: "/spec/template/spec/files" + valueFrom: + template: | + - contentFrom: + secret: + key: worker-node-azure.json + name: "{{ .builtin.machinePool.infrastructureRef.name }}-azure-json" + owner: root:root + path: /etc/kubernetes/azure.json + permissions: "0644" diff --git a/templates/test/ci/prow-aks-clusterclass/patches/addons.yaml b/templates/test/ci/prow-aks-clusterclass/patches/addons.yaml new file mode 100644 index 00000000000..51d2e93ca8a --- /dev/null +++ b/templates/test/ci/prow-aks-clusterclass/patches/addons.yaml @@ -0,0 +1,11 @@ +apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 +kind: AzureManagedControlPlaneTemplate +metadata: + name: ${CLUSTER_NAME}-control-plane + namespace: default +spec: + template: + spec: + addonProfiles: + - enabled: true + name: azurepolicy diff --git a/templates/test/ci/prow-aks-clusterclass/patches/aks-clusterclass-pool0.yaml b/templates/test/ci/prow-aks-clusterclass/patches/aks-clusterclass-pool0.yaml new file mode 100644 index 00000000000..0c7b26b42bf --- /dev/null +++ b/templates/test/ci/prow-aks-clusterclass/patches/aks-clusterclass-pool0.yaml @@ -0,0 +1,15 @@ +apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 +kind: AzureManagedMachinePoolTemplate +metadata: + name: "${CLUSTER_NAME}-pool0" +spec: + template: + spec: + maxPods: 30 + osDiskType: "Managed" + osDiskSizeGB: 30 + enableNodePublicIP: false + enableUltraSSD: true + availabilityZones: ["1", "2"] + name: pool0 + sku: "${AZURE_AKS_NODE_MACHINE_TYPE:=Standard_D2s_v3}" diff --git a/templates/test/ci/prow-aks-clusterclass/patches/aks-clusterclass-pool1.yaml b/templates/test/ci/prow-aks-clusterclass/patches/aks-clusterclass-pool1.yaml new file mode 100644 index 00000000000..f21c89c324a --- /dev/null +++ b/templates/test/ci/prow-aks-clusterclass/patches/aks-clusterclass-pool1.yaml @@ -0,0 +1,68 @@ +apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 +kind: AzureManagedMachinePoolTemplate +metadata: + name: "${CLUSTER_NAME}-pool1" +spec: + template: + spec: + maxPods: 64 + osDiskType: "Ephemeral" + osDiskSizeGB: 40 + enableNodePublicIP: false + scaleSetPriority: Regular + taints: + - effect: NoSchedule + key: type + value: shared + nodeLabels: + "type": "shared" + name: pool1 + sku: "${AZURE_AKS_NODE_MACHINE_TYPE:=Standard_D2s_v3}" + kubeletConfig: + cpuManagerPolicy: "static" + cpuCfsQuota: true + cpuCfsQuotaPeriod: "110ms" + imageGcHighThreshold: 70 + imageGcLowThreshold: 50 + topologyManagerPolicy: "best-effort" + allowedUnsafeSysctls: + - "net.*" + - "kernel.msg*" + failSwapOn: false + containerLogMaxSizeMB: 500 + containerLogMaxFiles: 50 + podMaxPids: 2048 + linuxOSConfig: + swapFileSizeMB: 1500 + sysctls: + fsAioMaxNr: 65536 + fsFileMax: 709620 + fsInotifyMaxUserWatches: 1048576 + fsNrOpen: 1048576 + kernelThreadsMax: 55601 + netCoreNetdevMaxBacklog: 1000 + netCoreOptmemMax: 20480 + netCoreRmemDefault: 212992 + netCoreRmemMax: 212992 + netCoreSomaxconn: 16384 + netCoreWmemDefault: 212992 + netCoreWmemMax: 212992 + netIpv4IPLocalPortRange: "32768 60999" + netIpv4NeighDefaultGcThresh1: 4096 + netIpv4NeighDefaultGcThresh2: 8192 + netIpv4NeighDefaultGcThresh3: 16384 + netIpv4TCPFinTimeout: 60 + netIpv4TCPKeepaliveProbes: 9 + netIpv4TCPKeepaliveTime: 7200 + netIpv4TCPMaxSynBacklog: 16384 + netIpv4TCPMaxTwBuckets: 32768 + netIpv4TCPTwReuse: false + netIpv4TCPkeepaliveIntvl: 75 + netNetfilterNfConntrackBuckets: 65536 + netNetfilterNfConntrackMax: 131072 + vmMaxMapCount: 65530 + vmSwappiness: 60 + vmVfsCachePressure: 100 + transparentHugePageDefrag: "madvise" + transparentHugePageEnabled: "always" +--- \ No newline at end of file diff --git a/templates/test/ci/prow-aks-clusterclass/patches/cluster.yaml b/templates/test/ci/prow-aks-clusterclass/patches/cluster.yaml new file mode 100644 index 00000000000..a1a4b8f8e39 --- /dev/null +++ b/templates/test/ci/prow-aks-clusterclass/patches/cluster.yaml @@ -0,0 +1,7 @@ +apiVersion: cluster.x-k8s.io/v1beta1 +kind: Cluster +metadata: + name: ${CLUSTER_NAME} +spec: + topology: + class: default diff --git a/templates/test/ci/prow-aks-clusterclass/patches/tags-aks-clusterclass.yaml b/templates/test/ci/prow-aks-clusterclass/patches/tags-aks-clusterclass.yaml new file mode 100644 index 00000000000..9a6f76edb6f --- /dev/null +++ b/templates/test/ci/prow-aks-clusterclass/patches/tags-aks-clusterclass.yaml @@ -0,0 +1,12 @@ +apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 +kind: AzureManagedControlPlaneTemplate +metadata: + name: ${CLUSTER_NAME}-control-plane + namespace: default +spec: + template: + spec: + additionalTags: + jobName: ${JOB_NAME} + creationTimestamp: ${TIMESTAMP} + buildProvenance: ${BUILD_PROVENANCE} \ No newline at end of file diff --git a/templates/test/ci/prow-topology/kustomization.yaml b/templates/test/ci/prow-topology/kustomization.yaml index 60a8aa47928..77f435eada9 100644 --- a/templates/test/ci/prow-topology/kustomization.yaml +++ b/templates/test/ci/prow-topology/kustomization.yaml @@ -2,7 +2,7 @@ apiVersion: kustomize.config.k8s.io/v1beta1 kind: Kustomization namespace: default resources: - - ../../../flavors/clusterclass/cluster.yaml + - ../../../flavors/clusterclass-cluster/cluster.yaml - cni-resource-set.yaml - ../../../addons/windows/csi-proxy/csi-proxy-resource-set.yaml - ../../../addons/cluster-api-helm/calico.yaml diff --git a/test/e2e/aks_autoscaler.go b/test/e2e/aks_autoscaler.go index 0a5a887209f..be3653f0d8b 100644 --- a/test/e2e/aks_autoscaler.go +++ b/test/e2e/aks_autoscaler.go @@ -36,9 +36,10 @@ import ( ) type AKSAutoscaleSpecInput struct { - Cluster *clusterv1.Cluster - MachinePool *expv1.MachinePool - WaitIntervals []interface{} + Cluster *clusterv1.Cluster + MachinePool *expv1.MachinePool + WaitIntervals []interface{} + isClusterClass bool } func AKSAutoscaleSpec(ctx context.Context, inputGetter func() AKSAutoscaleSpecInput) { @@ -62,7 +63,14 @@ func AKSAutoscaleSpec(ctx context.Context, inputGetter func() AKSAutoscaleSpecIn Expect(err).NotTo(HaveOccurred()) ammp := &infrav1.AzureManagedMachinePool{} - err = mgmtClient.Get(ctx, client.ObjectKeyFromObject(input.MachinePool), ammp) + if input.isClusterClass { + err = bootstrapClusterProxy.GetClient().Get(ctx, types.NamespacedName{ + Namespace: input.MachinePool.Spec.Template.Spec.InfrastructureRef.Namespace, + Name: input.MachinePool.Spec.Template.Spec.InfrastructureRef.Name, + }, ammp) + } else { + err = mgmtClient.Get(ctx, client.ObjectKeyFromObject(input.MachinePool), ammp) + } Expect(err).NotTo(HaveOccurred()) resourceGroupName := amcp.Spec.ResourceGroupName diff --git a/test/e2e/azure_test.go b/test/e2e/azure_test.go index 1625f26b851..9dfd95f1bfe 100644 --- a/test/e2e/azure_test.go +++ b/test/e2e/azure_test.go @@ -844,6 +844,135 @@ var _ = Describe("Workload cluster creation", func() { }) }) + Context("Creating an AKS cluster using ClusterClass [Managed Kubernetes]", func() { + It("with a single control plane node, one linux worker node, and one windows worker node", func() { + // use default as the clusterclass name so test infra can find the clusterclass template + os.Setenv("CLUSTER_CLASS_NAME", "default") + + // use "cc" as spec name because natgw pip name exceeds limit. + clusterName = getClusterName(clusterNamePrefix, "cc") + kubernetesVersionUpgradeFrom, err := GetAKSKubernetesVersion(ctx, e2eConfig, AKSKubernetesVersionUpgradeFrom) + Expect(err).To(BeNil()) + kubernetesVersion, err := GetAKSKubernetesVersion(ctx, e2eConfig, AKSKubernetesVersion) + Expect(err).To(BeNil()) + + // Opt into using windows with prow template + Expect(os.Setenv("WINDOWS_WORKER_MACHINE_COUNT", "1")).To(Succeed()) + + // Create a cluster using the cluster class created above + clusterctl.ApplyClusterTemplateAndWait(ctx, createApplyClusterTemplateInput( + specName, + withFlavor("aks-clusterclass"), + withNamespace(namespace.Name), + withClusterName(clusterName), + withKubernetesVersion(kubernetesVersionUpgradeFrom), + withControlPlaneMachineCount(1), + withWorkerMachineCount(1), + withMachineDeploymentInterval(specName, ""), + withMachinePoolInterval(specName, "wait-worker-nodes"), + withControlPlaneWaiters(clusterctl.ControlPlaneWaiters{ + WaitForControlPlaneInitialized: WaitForAKSControlPlaneInitialized, + WaitForControlPlaneMachinesReady: WaitForAKSControlPlaneReady, + }), + ), result) + + By("Upgrading the Kubernetes version of the cluster", func() { + AKSUpgradeSpec(ctx, func() AKSUpgradeSpecInput { + return AKSUpgradeSpecInput{ + Cluster: result.Cluster, + MachinePools: result.MachinePools, + KubernetesVersionUpgradeTo: kubernetesVersion, + WaitForControlPlane: e2eConfig.GetIntervals(specName, "wait-machine-upgrade"), + WaitForMachinePools: e2eConfig.GetIntervals(specName, "wait-machine-pool-upgrade"), + } + }) + }) + + By("Exercising machine pools", func() { + AKSMachinePoolSpec(ctx, func() AKSMachinePoolSpecInput { + return AKSMachinePoolSpecInput{ + Cluster: result.Cluster, + MachinePools: result.MachinePools, + WaitIntervals: e2eConfig.GetIntervals(specName, "wait-machine-pool-nodes"), + } + }) + }) + + By("creating a machine pool with public IP addresses from a prefix", func() { + // This test is also currently serving as the canonical + // "create/delete node pool" test. Eventually, that should be + // made more distinct from this public IP prefix test. + AKSPublicIPPrefixSpec(ctx, func() AKSPublicIPPrefixSpecInput { + return AKSPublicIPPrefixSpecInput{ + Cluster: result.Cluster, + KubernetesVersion: kubernetesVersion, + WaitIntervals: e2eConfig.GetIntervals(specName, "wait-worker-nodes"), + } + }) + }) + + By("creating a machine pool with spot max price and scale down mode", func() { + AKSSpotSpec(ctx, func() AKSSpotSpecInput { + return AKSSpotSpecInput{ + Cluster: result.Cluster, + KubernetesVersion: kubernetesVersion, + WaitIntervals: e2eConfig.GetIntervals(specName, "wait-worker-nodes"), + } + }) + }) + + By("modifying nodepool autoscaling configuration", func() { + AKSAutoscaleSpec(ctx, func() AKSAutoscaleSpecInput { + return AKSAutoscaleSpecInput{ + Cluster: result.Cluster, + MachinePool: result.MachinePools[0], + WaitIntervals: e2eConfig.GetIntervals(specName, "wait-machine-pool-nodes"), + isClusterClass: true, + } + }) + }) + + By("modifying additionalTags configuration", func() { + AKSAdditionalTagsSpec(ctx, func() AKSAdditionalTagsSpecInput { + return AKSAdditionalTagsSpecInput{ + Cluster: result.Cluster, + MachinePools: result.MachinePools, + WaitForUpdate: e2eConfig.GetIntervals(specName, "wait-machine-pool-nodes"), + } + }) + }) + + By("modifying the azure cluster-autoscaler settings", func() { + AKSAzureClusterAutoscalerSettingsSpec(ctx, func() AKSAzureClusterAutoscalerSettingsSpecInput { + return AKSAzureClusterAutoscalerSettingsSpecInput{ + Cluster: result.Cluster, + WaitIntervals: e2eConfig.GetIntervals(specName, "wait-control-plane"), + } + }) + }) + + By("modifying node labels configuration", func() { + AKSNodeLabelsSpec(ctx, func() AKSNodeLabelsSpecInput { + return AKSNodeLabelsSpecInput{ + Cluster: result.Cluster, + MachinePools: result.MachinePools, + WaitForUpdate: e2eConfig.GetIntervals(specName, "wait-machine-pool-nodes"), + } + }) + }) + + By("modifying taints configuration", func() { + AKSNodeTaintsSpec(ctx, func() AKSNodeTaintsSpecInput { + return AKSNodeTaintsSpecInput{ + Cluster: result.Cluster, + MachinePools: result.MachinePools, + WaitForUpdate: e2eConfig.GetIntervals(specName, "wait-machine-pool-nodes"), + } + }) + }) + }) + }) + // ci-e2e.sh and Prow CI skip this test by default. To include this test, set `GINKGO_SKIP=""`. // This spec expects a user-assigned identity named "cloud-provider-user-identity" in a "capz-ci" // resource group. Override these defaults by setting the USER_IDENTITY and CI_RG environment variables. diff --git a/test/e2e/common.go b/test/e2e/common.go index 3556c62f3b3..b690f6926e1 100644 --- a/test/e2e/common.go +++ b/test/e2e/common.go @@ -39,6 +39,7 @@ import ( "k8s.io/client-go/rest" "k8s.io/client-go/tools/clientcmd" "k8s.io/utils/ptr" + infrav1 "sigs.k8s.io/cluster-api-provider-azure/api/v1beta1" "sigs.k8s.io/cluster-api-provider-azure/azure" e2e_namespace "sigs.k8s.io/cluster-api-provider-azure/test/e2e/kubernetes/namespace" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" @@ -291,7 +292,7 @@ func ensureControlPlaneInitialized(ctx context.Context, input clusterctl.ApplyCu }, input.WaitForControlPlaneIntervals...).Should(Succeed(), "API Server was not reachable in time") _, hasWindows := cluster.Labels["cni-windows"] - if kubeadmControlPlane.Spec.KubeadmConfigSpec.ClusterConfiguration.ControllerManager.ExtraArgs["cloud-provider"] != "azure" { + if kubeadmControlPlane.Spec.KubeadmConfigSpec.ClusterConfiguration.ControllerManager.ExtraArgs["cloud-provider"] != infrav1.CloudProviderName { // There is a co-dependency between cloud-provider and CNI so we install both together if cloud-provider is external. InstallCNIAndCloudProviderAzureHelmChart(ctx, input, installHelmCharts, cluster.Spec.ClusterNetwork.Pods.CIDRBlocks, hasWindows) } else { diff --git a/test/e2e/config/azure-dev.yaml b/test/e2e/config/azure-dev.yaml index 6a6fa1ec4b3..acb3dffa831 100644 --- a/test/e2e/config/azure-dev.yaml +++ b/test/e2e/config/azure-dev.yaml @@ -3,11 +3,11 @@ managementClusterName: capz-e2e images: - name: ${MANAGER_IMAGE} loadBehavior: mustLoad - - name: registry.k8s.io/cluster-api/cluster-api-controller:v1.5.2 + - name: gcr.io/k8s-staging-cluster-api/cluster-api-controller:nightly_main_20230929 loadBehavior: tryLoad - - name: registry.k8s.io/cluster-api/kubeadm-bootstrap-controller:v1.5.2 + - name: gcr.io/k8s-staging-cluster-api/kubeadm-bootstrap-controller:nightly_main_20230929 loadBehavior: tryLoad - - name: registry.k8s.io/cluster-api/kubeadm-control-plane-controller:v1.5.2 + - name: gcr.io/k8s-staging-cluster-api/kubeadm-control-plane-controller:nightly_main_20230929 loadBehavior: tryLoad - name: registry.k8s.io/cluster-api-helm/cluster-api-helm-controller:v0.1.0-alpha.9 loadBehavior: tryLoad @@ -26,7 +26,7 @@ providers: files: - sourcePath: "../data/shared/v1beta1/metadata.yaml" - name: v1.5.2 - value: https://github.com/kubernetes-sigs/cluster-api/releases/download/v1.5.2/core-components.yaml + value: "https://storage.googleapis.com/artifacts.k8s-staging-cluster-api.appspot.com/components/nightly_main_20230929/core-components.yaml" type: url contract: v1beta1 files: @@ -49,7 +49,7 @@ providers: files: - sourcePath: "../data/shared/v1beta1/metadata.yaml" - name: v1.5.2 - value: https://github.com/kubernetes-sigs/cluster-api/releases/download/v1.5.2/bootstrap-components.yaml + value: https://storage.googleapis.com/artifacts.k8s-staging-cluster-api.appspot.com/components/nightly_main_20230929/bootstrap-components.yaml type: url contract: v1beta1 files: @@ -71,7 +71,7 @@ providers: files: - sourcePath: "../data/shared/v1beta1/metadata.yaml" - name: v1.5.2 - value: https://github.com/kubernetes-sigs/cluster-api/releases/download/v1.5.2/control-plane-components.yaml + value: https://storage.googleapis.com/artifacts.k8s-staging-cluster-api.appspot.com/components/nightly_main_20230929/control-plane-components.yaml type: url contract: v1beta1 files: @@ -132,12 +132,16 @@ providers: targetName: "cluster-template-workload-identity.yaml" - sourcePath: "${PWD}/templates/test/ci/cluster-template-prow-aks.yaml" targetName: "cluster-template-aks.yaml" + - sourcePath: "${PWD}/templates/test/ci/cluster-template-prow-aks-clusterclass.yaml" + targetName: "cluster-template-aks-clusterclass.yaml" - sourcePath: "${PWD}/templates/test/ci/cluster-template-prow-custom-vnet.yaml" targetName: "cluster-template-custom-vnet.yaml" - sourcePath: "${PWD}/templates/test/ci/cluster-template-prow-dual-stack.yaml" targetName: "cluster-template-dual-stack.yaml" - sourcePath: "${PWD}/templates/test/ci/cluster-template-prow-clusterclass-ci-default.yaml" targetName: "clusterclass-ci-default.yaml" + - sourcePath: "${PWD}/templates/test/ci/cluster-template-prow-aks-clusterclass.yaml" + targetName: "clusterclass-default.yaml" - sourcePath: "${PWD}/templates/test/ci/cluster-template-prow-topology.yaml" targetName: "cluster-template-topology.yaml" - sourcePath: "${PWD}/templates/test/ci/cluster-template-prow-flatcar.yaml" From 3338a43de6067ed36ee4e3ddf646e39785571124 Mon Sep 17 00:00:00 2001 From: willie-yao Date: Wed, 18 Oct 2023 20:22:00 +0000 Subject: [PATCH 2/5] Add webhook for ammpt --- .../azuremanagedmachinepool_webhook.go | 201 ++++++------ .../azuremanagedmachinepool_webhook_test.go | 2 +- ...azuremanagedmachinepooltemplate_webhook.go | 304 ++++++++++++++++++ ...aml => cluster-template-aks-topology.yaml} | 0 templates/flavors/aks-topology/cluster.yaml | 21 ++ .../kustomization.yaml | 0 .../prow-aks-clusterclass/kustomization.yaml | 2 +- 7 files changed, 438 insertions(+), 92 deletions(-) create mode 100644 api/v1beta1/azuremanagedmachinepooltemplate_webhook.go rename templates/{flavors/aks-clusterclass-cluster/cluster.yaml => cluster-template-aks-topology.yaml} (100%) create mode 100644 templates/flavors/aks-topology/cluster.yaml rename templates/flavors/{aks-clusterclass-cluster => aks-topology}/kustomization.yaml (100%) diff --git a/api/v1beta1/azuremanagedmachinepool_webhook.go b/api/v1beta1/azuremanagedmachinepool_webhook.go index d5a0f7110f0..9bf2e1a350d 100644 --- a/api/v1beta1/azuremanagedmachinepool_webhook.go +++ b/api/v1beta1/azuremanagedmachinepool_webhook.go @@ -105,24 +105,45 @@ func (mw *azureManagedMachinePoolWebhook) ValidateCreate(ctx context.Context, ob "can be set only if the Cluster API 'MachinePool' feature flag is enabled", ) } - validators := []func() error{ - m.validateMaxPods, - m.validateOSType, - m.validateName, - m.validateNodeLabels, - m.validateNodePublicIPPrefixID, - m.validateEnableNodePublicIP, - m.validateKubeletConfig, - m.validateLinuxOSConfig, - m.validateSubnetName, - } var errs []error - for _, validator := range validators { - if err := validator(); err != nil { - errs = append(errs, err) - } - } + + errs = append(errs, validateMaxPods( + m.Spec.MaxPods, + field.NewPath("Spec", "MaxPods"))) + + errs = append(errs, validateOSType( + m.Spec.Mode, + m.Spec.OSType, + field.NewPath("Spec", "OSType"))) + + errs = append(errs, validateMPName( + m.Name, + m.Spec.OSType, + m.Spec.Name, + field.NewPath("Spec", "Name"))) + + errs = append(errs, validateNodeLabels( + m.Spec.NodeLabels, + field.NewPath("Spec", "NodeLabels"))) + + errs = append(errs, validateNodePublicIPPrefixID( + m.Spec.NodePublicIPPrefixID, + field.NewPath("Spec", "NodePublicIPPrefixID"))) + + errs = append(errs, validateEnableNodePublicIP( + m.Spec.EnableNodePublicIP, + m.Spec.NodePublicIPPrefixID, + field.NewPath("Spec", "EnableNodePublicIP"))) + + errs = append(errs, validateKubeletConfig( + m.Spec.KubeletConfig, + field.NewPath("Spec", "KubeletConfig"))) + + errs = append(errs, validateLinuxOSConfig( + m.Spec.LinuxOSConfig, + m.Spec.KubeletConfig, + field.NewPath("Spec", "LinuxOSConfig"))) return nil, kerrors.NewAggregate(errs) } @@ -146,7 +167,7 @@ func (mw *azureManagedMachinePoolWebhook) ValidateUpdate(ctx context.Context, ol allErrs = append(allErrs, err) } - if err := m.validateNodeLabels(); err != nil { + if err := validateNodeLabels(m.Spec.NodeLabels, field.NewPath("Spec", "NodeLabels")); err != nil { allErrs = append(allErrs, field.Invalid( field.NewPath("Spec", "NodeLabels"), @@ -210,7 +231,7 @@ func (mw *azureManagedMachinePoolWebhook) ValidateUpdate(ctx context.Context, ol if m.Spec.Mode != string(NodePoolModeSystem) && old.Spec.Mode == string(NodePoolModeSystem) { // validate for last system node pool - if err := m.validateLastSystemNodePool(mw.Client); err != nil { + if err := validateLastSystemNodePool(mw.Client, m.Labels, m.Namespace); err != nil { allErrs = append(allErrs, field.Forbidden( field.NewPath("Spec", "Mode"), "Cannot change node pool mode to User, you must have at least one System node pool in your cluster")) @@ -295,23 +316,23 @@ func (mw *azureManagedMachinePoolWebhook) ValidateDelete(ctx context.Context, ob return nil, nil } - return nil, errors.Wrapf(m.validateLastSystemNodePool(mw.Client), "if the delete is triggered via owner MachinePool please refer to trouble shooting section in https://capz.sigs.k8s.io/topics/managedcluster.html") + return nil, errors.Wrapf(validateLastSystemNodePool(mw.Client, m.Labels, m.Namespace), "if the delete is triggered via owner MachinePool please refer to trouble shooting section in https://capz.sigs.k8s.io/topics/managedcluster.html") } // validateLastSystemNodePool is used to check if the existing system node pool is the last system node pool. // If it is a last system node pool it cannot be deleted or mutated to user node pool as AKS expects min 1 system node pool. -func (m *AzureManagedMachinePool) validateLastSystemNodePool(cli client.Client) error { +func validateLastSystemNodePool(cli client.Client, labels map[string]string, namespace string) error { ctx := context.Background() // Fetch the Cluster. - clusterName, ok := m.Labels[clusterv1.ClusterNameLabel] + clusterName, ok := labels[clusterv1.ClusterNameLabel] if !ok { return nil } ownerCluster := &clusterv1.Cluster{} key := client.ObjectKey{ - Namespace: m.Namespace, + Namespace: namespace, Name: clusterName, } @@ -327,7 +348,7 @@ func (m *AzureManagedMachinePool) validateLastSystemNodePool(cli client.Client) return nil } - opt1 := client.InNamespace(m.Namespace) + opt1 := client.InNamespace(namespace) opt2 := client.MatchingLabels(map[string]string{ clusterv1.ClusterNameLabel: clusterName, LabelAgentPoolMode: string(NodePoolModeSystem), @@ -344,12 +365,12 @@ func (m *AzureManagedMachinePool) validateLastSystemNodePool(cli client.Client) return nil } -func (m *AzureManagedMachinePool) validateMaxPods() error { - if m.Spec.MaxPods != nil { - if ptr.Deref[int32](m.Spec.MaxPods, 0) < 10 || ptr.Deref[int32](m.Spec.MaxPods, 0) > 250 { +func validateMaxPods(maxPods *int32, fldPath *field.Path) error { + if maxPods != nil { + if ptr.Deref[int32](maxPods, 0) < 10 || ptr.Deref[int32](maxPods, 0) > 250 { return field.Invalid( - field.NewPath("Spec", "MaxPods"), - m.Spec.MaxPods, + fldPath, + maxPods, "MaxPods must be between 10 and 250") } } @@ -357,11 +378,11 @@ func (m *AzureManagedMachinePool) validateMaxPods() error { return nil } -func (m *AzureManagedMachinePool) validateOSType() error { - if m.Spec.Mode == string(NodePoolModeSystem) { - if m.Spec.OSType != nil && *m.Spec.OSType != LinuxOS { +func validateOSType(mode string, osType *string, fldPath *field.Path) error { + if mode == string(NodePoolModeSystem) { + if osType != nil && *osType != LinuxOS { return field.Forbidden( - field.NewPath("Spec", "OSType"), + fldPath, "System node pooll must have OSType 'Linux'") } } @@ -369,48 +390,48 @@ func (m *AzureManagedMachinePool) validateOSType() error { return nil } -func (m *AzureManagedMachinePool) validateName() error { +func validateMPName(mpName string, specName *string, osType *string, fldPath *field.Path) error { var name *string var fieldNameMessage string - if m.Spec.Name == nil || *m.Spec.Name == "" { - name = &m.Name + if specName == nil || *specName == "" { + name = &mpName fieldNameMessage = "when spec.name is empty, metadata.name" } else { - name = m.Spec.Name + name = specName fieldNameMessage = "spec.name" } - if err := validateNameLength(m.Spec.OSType, name, fieldNameMessage); err != nil { + if err := validateNameLength(osType, name, fieldNameMessage, fldPath); err != nil { return err } - return validateNamePattern(name, fieldNameMessage) + return validateNamePattern(name, fieldNameMessage, fldPath) } -func validateNameLength(osType *string, name *string, fieldNameMessage string) error { +func validateNameLength(osType *string, name *string, fieldNameMessage string, fldPath *field.Path) error { if osType != nil && *osType == WindowsOS && name != nil && len(*name) > 6 { return field.Invalid( - field.NewPath("Spec", "Name"), + fldPath, name, fmt.Sprintf("For OSType Windows, %s can not be longer than 6 characters.", fieldNameMessage)) } else if (osType == nil || *osType == LinuxOS) && (name != nil && len(*name) > 12) { return field.Invalid( - field.NewPath("Spec", "Name"), + fldPath, osType, fmt.Sprintf("For OSType Linux, %s can not be longer than 12 characters.", fieldNameMessage)) } return nil } -func validateNamePattern(name *string, fieldNameMessage string) error { +func validateNamePattern(name *string, fieldNameMessage string, fldPath *field.Path) error { if name == nil || *name == "" { return nil } if !unicode.IsLower(rune((*name)[0])) { return field.Invalid( - field.NewPath("Spec", "Name"), + fldPath, name, fmt.Sprintf("%s must begin with a lowercase letter.", fieldNameMessage)) } @@ -418,7 +439,7 @@ func validateNamePattern(name *string, fieldNameMessage string) error { for _, char := range *name { if !(unicode.IsLower(char) || unicode.IsNumber(char)) { return field.Invalid( - field.NewPath("Spec", "Name"), + fldPath, name, fmt.Sprintf("%s may only contain lowercase alphanumeric characters.", fieldNameMessage)) } @@ -426,11 +447,11 @@ func validateNamePattern(name *string, fieldNameMessage string) error { return nil } -func (m *AzureManagedMachinePool) validateNodeLabels() error { - for key := range m.Spec.NodeLabels { +func validateNodeLabels(nodeLabels map[string]string, fldPath *field.Path) error { + for key := range nodeLabels { if azureutil.IsAzureSystemNodeLabelKey(key) { return field.Invalid( - field.NewPath("Spec", "NodeLabels"), + fldPath, key, fmt.Sprintf("Node pool label key must not start with %s", azureutil.AzureSystemNodeLabelPrefix)) } @@ -439,33 +460,33 @@ func (m *AzureManagedMachinePool) validateNodeLabels() error { return nil } -func (m *AzureManagedMachinePool) validateNodePublicIPPrefixID() error { - if m.Spec.NodePublicIPPrefixID != nil && !validNodePublicPrefixID.MatchString(*m.Spec.NodePublicIPPrefixID) { +func validateNodePublicIPPrefixID(nodePublicIPPrefixID *string, fldPath *field.Path) error { + if nodePublicIPPrefixID != nil && !validNodePublicPrefixID.MatchString(*nodePublicIPPrefixID) { return field.Invalid( - field.NewPath("Spec", "NodePublicIPPrefixID"), - m.Spec.NodePublicIPPrefixID, + fldPath, + nodePublicIPPrefixID, fmt.Sprintf("resource ID must match %q", validNodePublicPrefixID.String())) } return nil } -func (m *AzureManagedMachinePool) validateEnableNodePublicIP() error { - if (m.Spec.EnableNodePublicIP == nil || !*m.Spec.EnableNodePublicIP) && - m.Spec.NodePublicIPPrefixID != nil { +func validateEnableNodePublicIP(enableNodePublicIP *bool, nodePublicIPPrefixID *string, fldPath *field.Path) error { + if (enableNodePublicIP == nil || !*enableNodePublicIP) && + nodePublicIPPrefixID != nil { return field.Invalid( - field.NewPath("Spec", "EnableNodePublicIP"), - m.Spec.EnableNodePublicIP, + fldPath, + enableNodePublicIP, "must be set to true when NodePublicIPPrefixID is set") } return nil } -func (m *AzureManagedMachinePool) validateSubnetName() error { - if m.Spec.SubnetName != nil { +func validateMachinePoolSubnetName(subnetName *string, fldPath *field.Path) error { + if subnetName != nil { subnetRegex := "^[a-zA-Z0-9][a-zA-Z0-9-]{0,78}[a-zA-Z0-9]$" regex := regexp.MustCompile(subnetRegex) - if success := regex.MatchString(ptr.Deref(m.Spec.SubnetName, "")); !success { - return field.Invalid(field.NewPath("Spec", "SubnetName"), m.Spec.SubnetName, + if success := regex.MatchString(ptr.Deref(subnetName, "")); !success { + return field.Invalid(fldPath, subnetName, fmt.Sprintf("name of subnet doesn't match regex %s", subnetRegex)) } } @@ -474,7 +495,7 @@ func (m *AzureManagedMachinePool) validateSubnetName() error { // validateKubeletConfig enforces the AKS API configuration for KubeletConfig. // See: https://learn.microsoft.com/en-us/azure/aks/custom-node-configuration. -func (m *AzureManagedMachinePool) validateKubeletConfig() error { +func validateKubeletConfig(kubeletConfig *KubeletConfig, fldPath *field.Path) error { var allowedUnsafeSysctlsPatterns = []string{ `^kernel\.shm.+$`, `^kernel\.msg.+$`, @@ -482,25 +503,25 @@ func (m *AzureManagedMachinePool) validateKubeletConfig() error { `^fs\.mqueue\..+$`, `^net\..+$`, } - if m.Spec.KubeletConfig != nil { - if m.Spec.KubeletConfig.CPUCfsQuotaPeriod != nil { - if !strings.HasSuffix(ptr.Deref(m.Spec.KubeletConfig.CPUCfsQuotaPeriod, ""), "ms") { + if kubeletConfig != nil { + if kubeletConfig.CPUCfsQuotaPeriod != nil { + if !strings.HasSuffix(ptr.Deref(kubeletConfig.CPUCfsQuotaPeriod, ""), "ms") { return field.Invalid( - field.NewPath("Spec", "KubeletConfig", "CPUCfsQuotaPeriod"), - m.Spec.KubeletConfig.CPUCfsQuotaPeriod, + fldPath.Child("CPUfsQuotaPeriod"), + kubeletConfig.CPUCfsQuotaPeriod, "must be a string value in milliseconds with a 'ms' suffix, e.g., '100ms'") } } - if m.Spec.KubeletConfig.ImageGcHighThreshold != nil && m.Spec.KubeletConfig.ImageGcLowThreshold != nil { - if ptr.Deref[int32](m.Spec.KubeletConfig.ImageGcLowThreshold, 0) > ptr.Deref[int32](m.Spec.KubeletConfig.ImageGcHighThreshold, 0) { + if kubeletConfig.ImageGcHighThreshold != nil && kubeletConfig.ImageGcLowThreshold != nil { + if ptr.Deref[int32](kubeletConfig.ImageGcLowThreshold, 0) > ptr.Deref[int32](kubeletConfig.ImageGcHighThreshold, 0) { return field.Invalid( - field.NewPath("Spec", "KubeletConfig", "ImageGcLowThreshold"), - m.Spec.KubeletConfig.ImageGcLowThreshold, + fldPath.Child("ImageGcLowThreshold"), + kubeletConfig.ImageGcLowThreshold, fmt.Sprintf("must not be greater than ImageGcHighThreshold, ImageGcLowThreshold=%d, ImageGcHighThreshold=%d", - ptr.Deref[int32](m.Spec.KubeletConfig.ImageGcLowThreshold, 0), ptr.Deref[int32](m.Spec.KubeletConfig.ImageGcHighThreshold, 0))) + ptr.Deref[int32](kubeletConfig.ImageGcLowThreshold, 0), ptr.Deref[int32](kubeletConfig.ImageGcHighThreshold, 0))) } } - for _, val := range m.Spec.KubeletConfig.AllowedUnsafeSysctls { + for _, val := range kubeletConfig.AllowedUnsafeSysctls { var hasMatch bool for _, p := range allowedUnsafeSysctlsPatterns { if m, _ := regexp.MatchString(p, val); m { @@ -510,8 +531,8 @@ func (m *AzureManagedMachinePool) validateKubeletConfig() error { } if !hasMatch { return field.Invalid( - field.NewPath("Spec", "KubeletConfig", "AllowedUnsafeSysctls"), - m.Spec.KubeletConfig.AllowedUnsafeSysctls, + fldPath.Child("AllowedUnsafeSysctls"), + kubeletConfig.AllowedUnsafeSysctls, fmt.Sprintf("%s is not a supported AllowedUnsafeSysctls configuration", val)) } } @@ -521,25 +542,25 @@ func (m *AzureManagedMachinePool) validateKubeletConfig() error { // validateLinuxOSConfig enforces AKS API configuration for Linux OS custom configuration // See: https://learn.microsoft.com/en-us/azure/aks/custom-node-configuration#linux-os-custom-configuration for detailed information. -func (m *AzureManagedMachinePool) validateLinuxOSConfig() error { +func validateLinuxOSConfig(linuxOSConfig *LinuxOSConfig, kubeletConfig *KubeletConfig, fldPath *field.Path) error { var errs []error - if m.Spec.LinuxOSConfig == nil { + if linuxOSConfig == nil { return nil } - if m.Spec.LinuxOSConfig.SwapFileSizeMB != nil { - if m.Spec.KubeletConfig == nil || ptr.Deref(m.Spec.KubeletConfig.FailSwapOn, true) { + if linuxOSConfig.SwapFileSizeMB != nil { + if kubeletConfig == nil || ptr.Deref(kubeletConfig.FailSwapOn, true) { errs = append(errs, field.Invalid( - field.NewPath("Spec", "LinuxOSConfig", "SwapFileSizeMB"), - m.Spec.LinuxOSConfig.SwapFileSizeMB, + fldPath.Child("SwapFileSizeMB"), + linuxOSConfig.SwapFileSizeMB, "KubeletConfig.FailSwapOn must be set to false to enable swap file on nodes")) } } - if m.Spec.LinuxOSConfig.Sysctls != nil && m.Spec.LinuxOSConfig.Sysctls.NetIpv4IPLocalPortRange != nil { + if linuxOSConfig.Sysctls != nil && linuxOSConfig.Sysctls.NetIpv4IPLocalPortRange != nil { // match numbers separated by a space portRangeRegex := `^[0-9]+ [0-9]+$` - portRange := *m.Spec.LinuxOSConfig.Sysctls.NetIpv4IPLocalPortRange + portRange := *linuxOSConfig.Sysctls.NetIpv4IPLocalPortRange match, matchErr := regexp.MatchString(portRangeRegex, portRange) if matchErr != nil { @@ -547,8 +568,8 @@ func (m *AzureManagedMachinePool) validateLinuxOSConfig() error { } if !match { errs = append(errs, field.Invalid( - field.NewPath("Spec", "LinuxOSConfig", "Sysctls", "NetIpv4IpLocalPortRange"), - m.Spec.LinuxOSConfig.Sysctls.NetIpv4IPLocalPortRange, + fldPath.Child("NetIpv4IpLocalPortRange"), + linuxOSConfig.Sysctls.NetIpv4IPLocalPortRange, "LinuxOSConfig.Sysctls.NetIpv4IpLocalPortRange must be of the format \" \"")) } else { ports := strings.Split(portRange, " ") @@ -557,22 +578,22 @@ func (m *AzureManagedMachinePool) validateLinuxOSConfig() error { if firstPort < 1024 || firstPort > 60999 { errs = append(errs, field.Invalid( - field.NewPath("Spec", "LinuxOSConfig", "Sysctls", "NetIpv4IpLocalPortRange", "First"), - m.Spec.LinuxOSConfig.Sysctls.NetIpv4IPLocalPortRange, + fldPath.Child("NetIpv4IpLocalPortRange", "First"), + linuxOSConfig.Sysctls.NetIpv4IPLocalPortRange, fmt.Sprintf("first port of NetIpv4IpLocalPortRange=%d must be in between [1024 - 60999]", firstPort))) } if lastPort < 32768 || lastPort > 65000 { errs = append(errs, field.Invalid( - field.NewPath("Spec", "LinuxOSConfig", "Sysctls", "NetIpv4IpLocalPortRange", "Last"), - m.Spec.LinuxOSConfig.Sysctls.NetIpv4IPLocalPortRange, + fldPath.Child("NetIpv4IpLocalPortRange", "Last"), + linuxOSConfig.Sysctls.NetIpv4IPLocalPortRange, fmt.Sprintf("last port of NetIpv4IpLocalPortRange=%d must be in between [32768 -65000]", lastPort))) } if firstPort > lastPort { errs = append(errs, field.Invalid( - field.NewPath("Spec", "LinuxOSConfig", "Sysctls", "NetIpv4IpLocalPortRange", "First"), - m.Spec.LinuxOSConfig.Sysctls.NetIpv4IPLocalPortRange, + fldPath.Child("NetIpv4IpLocalPortRange", "First"), + linuxOSConfig.Sysctls.NetIpv4IPLocalPortRange, fmt.Sprintf("first port of NetIpv4IpLocalPortRange=%d cannot be greater than last port of NetIpv4IpLocalPortRange=%d", firstPort, lastPort))) } } diff --git a/api/v1beta1/azuremanagedmachinepool_webhook_test.go b/api/v1beta1/azuremanagedmachinepool_webhook_test.go index aa76eca8bd0..f250baa2404 100644 --- a/api/v1beta1/azuremanagedmachinepool_webhook_test.go +++ b/api/v1beta1/azuremanagedmachinepool_webhook_test.go @@ -1292,7 +1292,7 @@ func TestAzureManagedMachinePool_validateLastSystemNodePool(t *testing.T) { _ = AddToScheme(scheme) _ = clusterv1.AddToScheme(scheme) fakeClient := fake.NewClientBuilder().WithScheme(scheme).WithRuntimeObjects(tc.cluster, tc.ammp).Build() - err := tc.ammp.validateLastSystemNodePool(fakeClient) + err := validateLastSystemNodePool(fakeClient, tc.ammp.Spec.NodeLabels, tc.ammp.Namespace) if tc.wantErr { g.Expect(err).To(HaveOccurred()) } else { diff --git a/api/v1beta1/azuremanagedmachinepooltemplate_webhook.go b/api/v1beta1/azuremanagedmachinepooltemplate_webhook.go new file mode 100644 index 00000000000..979432d9e7e --- /dev/null +++ b/api/v1beta1/azuremanagedmachinepooltemplate_webhook.go @@ -0,0 +1,304 @@ +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + "context" + "fmt" + "reflect" + + "github.com/pkg/errors" + apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/runtime" + kerrors "k8s.io/apimachinery/pkg/util/errors" + "k8s.io/apimachinery/pkg/util/validation/field" + "k8s.io/utils/ptr" + "sigs.k8s.io/cluster-api-provider-azure/feature" + "sigs.k8s.io/cluster-api-provider-azure/util/maps" + webhookutils "sigs.k8s.io/cluster-api-provider-azure/util/webhook" + capifeature "sigs.k8s.io/cluster-api/feature" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/webhook/admission" +) + +// AzureManagedMachinePoolTemplateImmutableMsg is the message used for errors on fields that are immutable. +const AzureManagedMachinePoolTemplateImmutableMsg = "AzureManagedMachinePoolTemplate spec.template.spec field is immutable. Please create new resource instead. ref doc: https://cluster-api.sigs.k8s.io/tasks/experimental-features/cluster-class/change-clusterclass.html" + +// SetupAzureManagedMachinePoolTemplateWithManager will set up the webhook to be managed by the specified manager. +func SetupAzureManagedMachinePoolTemplateWithManager(mgr ctrl.Manager) error { + mpw := &AzureManagedMachinePoolTemplateWebhook{Client: mgr.GetClient()} + return ctrl.NewWebhookManagedBy(mgr). + For(&AzureManagedMachinePoolTemplate{}). + WithDefaulter(mpw). + WithValidator(mpw). + Complete() +} + +// +kubebuilder:webhook:verbs=create;update,path=/validate-infrastructure-cluster-x-k8s-io-v1beta1-azuremanagedmachinepooltemplate,mutating=false,failurePolicy=fail,matchPolicy=Equivalent,groups=infrastructure.cluster.x-k8s.io,resources=azuremanagedmachinepooltemplates,versions=v1beta1,name=validation.azuremanagedmachinepooltemplate.infrastructure.cluster.x-k8s.io,sideEffects=None,admissionReviewVersions=v1;v1beta1 +// +kubebuilder:webhook:verbs=create;update,path=/mutate-infrastructure-cluster-x-k8s-io-v1beta1-azuremanagedmachinepooltemplate,mutating=true,failurePolicy=fail,matchPolicy=Equivalent,groups=infrastructure.cluster.x-k8s.io,resources=azuremanagedmachinepooltemplates,versions=v1beta1,name=default.azuremanagedmachinepooltemplate.infrastructure.cluster.x-k8s.io,sideEffects=None,admissionReviewVersions=v1;v1beta1 + +type AzureManagedMachinePoolTemplateWebhook struct { + Client client.Client +} + +// Default implements webhook.Defaulter so a webhook will be registered for the type. +func (mpw *AzureManagedMachinePoolTemplateWebhook) Default(ctx context.Context, obj runtime.Object) error { + mp, ok := obj.(*AzureManagedMachinePoolTemplate) + if !ok { + return apierrors.NewBadRequest("expected an AzureManagedMachinePoolTemplate") + } + if mp.Labels == nil { + mp.Labels = make(map[string]string) + } + mp.Labels[LabelAgentPoolMode] = mp.Spec.Template.Spec.Mode + + if mp.Spec.Template.Spec.Name == nil || *mp.Spec.Template.Spec.Name == "" { + mp.Spec.Template.Spec.Name = &mp.Name + } + + if mp.Spec.Template.Spec.OSType == nil { + mp.Spec.Template.Spec.OSType = ptr.To(DefaultOSType) + } + + return nil +} + +// ValidateCreate implements webhook.Validator so a webhook will be registered for the type. +func (mpw *AzureManagedMachinePoolTemplateWebhook) ValidateCreate(ctx context.Context, obj runtime.Object) (admission.Warnings, error) { + mp, ok := obj.(*AzureManagedMachinePoolTemplate) + if !ok { + return nil, apierrors.NewBadRequest("expected an AzureManagedMachinePoolTemplate") + } + + if !feature.Gates.Enabled(capifeature.MachinePool) { + return nil, field.Forbidden( + field.NewPath("spec"), + "can be set only if the Cluster API 'MachinePool' feature flag is enabled", + ) + } + + var errs []error + + errs = append(errs, validateMaxPods( + mp.Spec.Template.Spec.MaxPods, + field.NewPath("Spec", "MaxPods"))) + + errs = append(errs, validateOSType( + mp.Spec.Template.Spec.Mode, + mp.Spec.Template.Spec.OSType, + field.NewPath("Spec", "OSType"))) + + errs = append(errs, validateAgentPoolName( + mp.Spec.Template.Spec.OSType, + mp.Spec.Template.Spec.Name, + field.NewPath("Spec", "Name"))) + + errs = append(errs, validateNodeLabels( + mp.Spec.Template.Spec.NodeLabels, + field.NewPath("Spec", "NodeLabels"))) + + errs = append(errs, validateNodePublicIPPrefixID( + mp.Spec.Template.Spec.NodePublicIPPrefixID, + field.NewPath("Spec", "NodePublicIPPrefixID"))) + + errs = append(errs, validateEnableNodePublicIP( + mp.Spec.Template.Spec.EnableNodePublicIP, + mp.Spec.Template.Spec.NodePublicIPPrefixID, + field.NewPath("Spec", "EnableNodePublicIP"))) + + errs = append(errs, validateKubeletConfig( + mp.Spec.Template.Spec.KubeletConfig, + field.NewPath("Spec", "KubeletConfig"))) + + errs = append(errs, validateLinuxOSConfig( + mp.Spec.Template.Spec.LinuxOSConfig, + mp.Spec.Template.Spec.KubeletConfig, + field.NewPath("Spec", "LinuxOSConfig"))) + + return nil, kerrors.NewAggregate(errs) +} + +// ValidateUpdate implements webhook.Validator so a webhook will be registered for the type. +func (mpw *AzureManagedMachinePoolTemplateWebhook) ValidateUpdate(ctx context.Context, oldObj, newObj runtime.Object) (admission.Warnings, error) { + var allErrs field.ErrorList + old, ok := oldObj.(*AzureManagedMachinePoolTemplate) + if !ok { + return nil, apierrors.NewBadRequest("expected an AzureManagedMachinePoolTemplate") + } + mp, ok := newObj.(*AzureManagedMachinePoolTemplate) + if !ok { + return nil, apierrors.NewBadRequest("expected an AzureManagedMachinePoolTemplate") + } + + if err := webhookutils.ValidateImmutable( + field.NewPath("Spec", "Template", "Spec", "Name"), + old.Spec.Template.Spec.Name, + mp.Spec.Template.Spec.Name); err != nil { + allErrs = append(allErrs, err) + } + + if err := validateNodeLabels(mp.Spec.Template.Spec.NodeLabels, field.NewPath("Spec", "Template", "Spec", "NodeLabels")); err != nil { + allErrs = append(allErrs, + field.Invalid( + field.NewPath("Spec", "Template", "Spec", "NodeLabels"), + mp.Spec.Template.Spec.NodeLabels, + err.Error())) + } + + if err := webhookutils.ValidateImmutable( + field.NewPath("Spec", "Template", "Spec", "OSType"), + old.Spec.Template.Spec.OSType, + mp.Spec.Template.Spec.OSType); err != nil { + allErrs = append(allErrs, err) + } + + if err := webhookutils.ValidateImmutable( + field.NewPath("Spec", "Template", "Spec", "SKU"), + old.Spec.Template.Spec.SKU, + mp.Spec.Template.Spec.SKU); err != nil { + allErrs = append(allErrs, err) + } + + if err := webhookutils.ValidateImmutable( + field.NewPath("Spec", "Template", "Spec", "OSDiskSizeGB"), + old.Spec.Template.Spec.OSDiskSizeGB, + mp.Spec.Template.Spec.OSDiskSizeGB); err != nil { + allErrs = append(allErrs, err) + } + + if err := webhookutils.ValidateImmutable( + field.NewPath("Spec", "Template", "Spec", "SubnetName"), + old.Spec.Template.Spec.SubnetName, + mp.Spec.Template.Spec.SubnetName); err != nil && old.Spec.Template.Spec.SubnetName != nil { + allErrs = append(allErrs, err) + } + + if err := webhookutils.ValidateImmutable( + field.NewPath("Spec", "Template", "Spec", "EnableFIPS"), + old.Spec.Template.Spec.EnableFIPS, + mp.Spec.Template.Spec.EnableFIPS); err != nil && old.Spec.Template.Spec.EnableFIPS != nil { + allErrs = append(allErrs, err) + } + + // custom headers are immutable + oldCustomHeaders := maps.FilterByKeyPrefix(old.ObjectMeta.Annotations, CustomHeaderPrefix) + newCustomHeaders := maps.FilterByKeyPrefix(mp.ObjectMeta.Annotations, CustomHeaderPrefix) + if !reflect.DeepEqual(oldCustomHeaders, newCustomHeaders) { + allErrs = append(allErrs, + field.Invalid( + field.NewPath("metadata", "annotations"), + mp.ObjectMeta.Annotations, + fmt.Sprintf("annotations with '%s' prefix are immutable", CustomHeaderPrefix))) + } + + if !webhookutils.EnsureStringSlicesAreEquivalent(mp.Spec.Template.Spec.AvailabilityZones, old.Spec.Template.Spec.AvailabilityZones) { + allErrs = append(allErrs, + field.Invalid( + field.NewPath("Spec", "Template", "Spec", "AvailabilityZones"), + mp.Spec.Template.Spec.AvailabilityZones, + "field is immutable")) + } + + if mp.Spec.Template.Spec.Mode != string(NodePoolModeSystem) && old.Spec.Template.Spec.Mode == string(NodePoolModeSystem) { + // validate for last system node pool + if err := validateLastSystemNodePool(mpw.Client, mp.Spec.Template.Spec.NodeLabels, mp.Namespace); err != nil { + allErrs = append(allErrs, field.Forbidden( + field.NewPath("Spec", "Template", "Spec", "Mode"), + "Cannot change node pool mode to User, you must have at least one System node pool in your cluster")) + } + } + + if err := webhookutils.ValidateImmutable( + field.NewPath("Spec", "Template", "Spec", "MaxPods"), + old.Spec.Template.Spec.MaxPods, + mp.Spec.Template.Spec.MaxPods); err != nil { + allErrs = append(allErrs, err) + } + + if err := webhookutils.ValidateImmutable( + field.NewPath("Spec", "Template", "Spec", "OsDiskType"), + old.Spec.Template.Spec.OsDiskType, + mp.Spec.Template.Spec.OsDiskType); err != nil { + allErrs = append(allErrs, err) + } + + if err := webhookutils.ValidateImmutable( + field.NewPath("Spec", "Template", "Spec", "ScaleSetPriority"), + old.Spec.Template.Spec.ScaleSetPriority, + mp.Spec.Template.Spec.ScaleSetPriority); err != nil { + allErrs = append(allErrs, err) + } + + if err := webhookutils.ValidateImmutable( + field.NewPath("Spec", "Template", "Spec", "EnableUltraSSD"), + old.Spec.Template.Spec.EnableUltraSSD, + mp.Spec.Template.Spec.EnableUltraSSD); err != nil { + allErrs = append(allErrs, err) + } + if err := webhookutils.ValidateImmutable( + field.NewPath("Spec", "Template", "Spec", "EnableNodePublicIP"), + old.Spec.Template.Spec.EnableNodePublicIP, + mp.Spec.Template.Spec.EnableNodePublicIP); err != nil { + allErrs = append(allErrs, err) + } + if err := webhookutils.ValidateImmutable( + field.NewPath("Spec", "Template", "Spec", "NodePublicIPPrefixID"), + old.Spec.Template.Spec.NodePublicIPPrefixID, + mp.Spec.Template.Spec.NodePublicIPPrefixID); err != nil { + allErrs = append(allErrs, err) + } + + if err := webhookutils.ValidateImmutable( + field.NewPath("Spec", "Template", "Spec", "KubeletConfig"), + old.Spec.Template.Spec.KubeletConfig, + mp.Spec.Template.Spec.KubeletConfig); err != nil { + allErrs = append(allErrs, err) + } + + if err := webhookutils.ValidateImmutable( + field.NewPath("Spec", "Template", "Spec", "KubeletDiskType"), + old.Spec.Template.Spec.KubeletDiskType, + mp.Spec.Template.Spec.KubeletDiskType); err != nil { + allErrs = append(allErrs, err) + } + + if err := webhookutils.ValidateImmutable( + field.NewPath("Spec", "Template", "Spec", "LinuxOSConfig"), + old.Spec.Template.Spec.LinuxOSConfig, + mp.Spec.Template.Spec.LinuxOSConfig); err != nil { + allErrs = append(allErrs, err) + } + + if len(allErrs) == 0 { + return nil, nil + } + return nil, apierrors.NewInvalid(GroupVersion.WithKind("AzureManagedMachinePoolTemplate").GroupKind(), mp.Name, allErrs) +} + +// ValidateDelete implements webhook.Validator so a webhook will be registered for the type. +func (mpw *AzureManagedMachinePoolTemplateWebhook) ValidateDelete(ctx context.Context, obj runtime.Object) (admission.Warnings, error) { + mp, ok := obj.(*AzureManagedMachinePoolTemplate) + if !ok { + return nil, apierrors.NewBadRequest("expected an AzureManagedMachinePoolTemplate") + } + if mp.Spec.Template.Spec.Mode != string(NodePoolModeSystem) { + return nil, nil + } + + return nil, errors.Wrapf(validateLastSystemNodePool(mpw.Client, mp.Spec.Template.Spec.NodeLabels, mp.Namespace), "if the delete is triggered via owner MachinePool please refer to trouble shooting section in https://capz.sigs.k8s.io/topics/managedcluster.html") +} diff --git a/templates/flavors/aks-clusterclass-cluster/cluster.yaml b/templates/cluster-template-aks-topology.yaml similarity index 100% rename from templates/flavors/aks-clusterclass-cluster/cluster.yaml rename to templates/cluster-template-aks-topology.yaml diff --git a/templates/flavors/aks-topology/cluster.yaml b/templates/flavors/aks-topology/cluster.yaml new file mode 100644 index 00000000000..c78efb4b389 --- /dev/null +++ b/templates/flavors/aks-topology/cluster.yaml @@ -0,0 +1,21 @@ +apiVersion: cluster.x-k8s.io/v1beta1 +kind: Cluster +metadata: + name: ${CLUSTER_NAME} + namespace: default +spec: + clusterNetwork: + pods: + cidrBlocks: + - 192.168.0.0/16 + topology: + class: ${CLUSTER_CLASS_NAME} + version: ${KUBERNETES_VERSION} + workers: + machinePools: + - class: default-system + name: mp-0 + replicas: 1 + - class: default-worker + name: mp-1 + replicas: 1 diff --git a/templates/flavors/aks-clusterclass-cluster/kustomization.yaml b/templates/flavors/aks-topology/kustomization.yaml similarity index 100% rename from templates/flavors/aks-clusterclass-cluster/kustomization.yaml rename to templates/flavors/aks-topology/kustomization.yaml diff --git a/templates/test/ci/prow-aks-clusterclass/kustomization.yaml b/templates/test/ci/prow-aks-clusterclass/kustomization.yaml index df8f28bf9cb..f72a0910979 100644 --- a/templates/test/ci/prow-aks-clusterclass/kustomization.yaml +++ b/templates/test/ci/prow-aks-clusterclass/kustomization.yaml @@ -3,7 +3,7 @@ kind: Kustomization namespace: default resources: - ../../../flavors/aks-clusterclass - - ../../../flavors/aks-clusterclass-cluster + - ../../../flavors/aks-topology patchesStrategicMerge: - patches/tags-aks-clusterclass.yaml - patches/aks-clusterclass-pool0.yaml From c52e29f88e40861215c5ac56a587554818b56d23 Mon Sep 17 00:00:00 2001 From: willie-yao Date: Wed, 18 Oct 2023 20:43:23 +0000 Subject: [PATCH 3/5] Add update validation to amcpt --- ...emanagedcontrolplanetemplate_validation.go | 53 ----- ...zuremanagedcontrolplanetemplate_webhook.go | 195 +++++++++++++++++- 2 files changed, 190 insertions(+), 58 deletions(-) delete mode 100644 api/v1beta1/azuremanagedcontrolplanetemplate_validation.go diff --git a/api/v1beta1/azuremanagedcontrolplanetemplate_validation.go b/api/v1beta1/azuremanagedcontrolplanetemplate_validation.go deleted file mode 100644 index 23e681e6fc7..00000000000 --- a/api/v1beta1/azuremanagedcontrolplanetemplate_validation.go +++ /dev/null @@ -1,53 +0,0 @@ -/* -Copyright 2023 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1beta1 - -import ( - "k8s.io/apimachinery/pkg/util/validation/field" - "sigs.k8s.io/controller-runtime/pkg/client" -) - -// Validate the Azure Managed Control Plane Template and return an aggregate error. -func (mcp *AzureManagedControlPlaneTemplate) validateManagedControlPlaneTemplate(cli client.Client) error { - var allErrs field.ErrorList - - allErrs = append(allErrs, validateDNSServiceIP( - mcp.Spec.Template.Spec.DNSServiceIP, - field.NewPath("spec").Child("template").Child("spec").Child("DNSServiceIP"))...) - - allErrs = append(allErrs, validateVersion( - mcp.Spec.Template.Spec.Version, - field.NewPath("spec").Child("template").Child("spec").Child("Version"))...) - - allErrs = append(allErrs, validateLoadBalancerProfile( - mcp.Spec.Template.Spec.LoadBalancerProfile, - field.NewPath("spec").Child("template").Child("spec").Child("LoadBalancerProfile"))...) - - allErrs = append(allErrs, validateManagedClusterNetwork( - cli, - mcp.Labels, - mcp.Namespace, - mcp.Spec.Template.Spec.DNSServiceIP, - mcp.Spec.Template.Spec.VirtualNetwork.Subnet, - field.NewPath("spec").Child("template").Child("spec"))...) - - allErrs = append(allErrs, validateName(mcp.Name, field.NewPath("Name"))...) - - allErrs = append(allErrs, validateAutoScalerProfile(mcp.Spec.Template.Spec.AutoScalerProfile, field.NewPath("spec").Child("template").Child("spec").Child("AutoScalerProfile"))...) - - return allErrs.ToAggregate() -} diff --git a/api/v1beta1/azuremanagedcontrolplanetemplate_webhook.go b/api/v1beta1/azuremanagedcontrolplanetemplate_webhook.go index 4d8182ac223..2a5873f1ab9 100644 --- a/api/v1beta1/azuremanagedcontrolplanetemplate_webhook.go +++ b/api/v1beta1/azuremanagedcontrolplanetemplate_webhook.go @@ -24,6 +24,7 @@ import ( "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/util/validation/field" "sigs.k8s.io/cluster-api-provider-azure/feature" + webhookutils "sigs.k8s.io/cluster-api-provider-azure/util/webhook" capifeature "sigs.k8s.io/cluster-api/feature" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" @@ -89,19 +90,203 @@ func (mcpw *azureManagedControlPlaneTemplateWebhook) ValidateUpdate(ctx context. if !ok { return nil, apierrors.NewBadRequest("expected an AzureManagedControlPlaneTemplate") } - if !reflect.DeepEqual(mcp.Spec.Template.Spec, old.Spec.Template.Spec) { - allErrs = append(allErrs, - field.Invalid(field.NewPath("AzureManagedControlPlaneTemplate", "spec", "template", "spec"), mcp, AzureManagedControlPlaneTemplateImmutableMsg), - ) + if err := webhookutils.ValidateImmutable( + field.NewPath("Spec", "SubscriptionID"), + old.Spec.Template.Spec.SubscriptionID, + mcp.Spec.Template.Spec.SubscriptionID); err != nil { + allErrs = append(allErrs, err) + } + + if err := webhookutils.ValidateImmutable( + field.NewPath("Spec", "Location"), + old.Spec.Template.Spec.Location, + mcp.Spec.Template.Spec.Location); err != nil { + allErrs = append(allErrs, err) + } + + if err := webhookutils.ValidateImmutable( + field.NewPath("Spec", "DNSServiceIP"), + old.Spec.Template.Spec.DNSServiceIP, + mcp.Spec.Template.Spec.DNSServiceIP); err != nil { + allErrs = append(allErrs, err) + } + + if err := webhookutils.ValidateImmutable( + field.NewPath("Spec", "NetworkPlugin"), + old.Spec.Template.Spec.NetworkPlugin, + mcp.Spec.Template.Spec.NetworkPlugin); err != nil { + allErrs = append(allErrs, err) + } + + if err := webhookutils.ValidateImmutable( + field.NewPath("Spec", "NetworkPolicy"), + old.Spec.Template.Spec.NetworkPolicy, + mcp.Spec.Template.Spec.NetworkPolicy); err != nil { + allErrs = append(allErrs, err) + } + + if err := webhookutils.ValidateImmutable( + field.NewPath("Spec", "LoadBalancerSKU"), + old.Spec.Template.Spec.LoadBalancerSKU, + mcp.Spec.Template.Spec.LoadBalancerSKU); err != nil { + allErrs = append(allErrs, err) + } + + if old.Spec.Template.Spec.AADProfile != nil { + if mcp.Spec.Template.Spec.AADProfile == nil { + allErrs = append(allErrs, + field.Invalid( + field.NewPath("Spec", "AADProfile"), + mcp.Spec.Template.Spec.AADProfile, + "field cannot be nil, cannot disable AADProfile")) + } else { + if !mcp.Spec.Template.Spec.AADProfile.Managed && old.Spec.Template.Spec.AADProfile.Managed { + allErrs = append(allErrs, + field.Invalid( + field.NewPath("Spec", "AADProfile.Managed"), + mcp.Spec.Template.Spec.AADProfile.Managed, + "cannot set AADProfile.Managed to false")) + } + if len(mcp.Spec.Template.Spec.AADProfile.AdminGroupObjectIDs) == 0 { + allErrs = append(allErrs, + field.Invalid( + field.NewPath("Spec", "AADProfile.AdminGroupObjectIDs"), + mcp.Spec.Template.Spec.AADProfile.AdminGroupObjectIDs, + "length of AADProfile.AdminGroupObjectIDs cannot be zero")) + } + } + } + + // Consider removing this once moves out of preview + // Updating outboundType after cluster creation (PREVIEW) + // https://learn.microsoft.com/en-us/azure/aks/egress-outboundtype#updating-outboundtype-after-cluster-creation-preview + if err := webhookutils.ValidateImmutable( + field.NewPath("Spec", "OutboundType"), + old.Spec.Template.Spec.OutboundType, + mcp.Spec.Template.Spec.OutboundType); err != nil { + allErrs = append(allErrs, err) + } + + if errs := mcp.validateVirtualNetworkTemplateUpdate(old); len(errs) > 0 { + allErrs = append(allErrs, errs...) + } + + if errs := mcp.validateAPIServerAccessProfileTemplateUpdate(old); len(errs) > 0 { + allErrs = append(allErrs, errs...) } if len(allErrs) == 0 { - return nil, nil + return nil, mcp.validateManagedControlPlaneTemplate(mcpw.Client) } + return nil, apierrors.NewInvalid(GroupVersion.WithKind("AzureManagedControlPlaneTemplate").GroupKind(), mcp.Name, allErrs) } +// Validate the Azure Managed Control Plane Template and return an aggregate error. +func (mcp *AzureManagedControlPlaneTemplate) validateManagedControlPlaneTemplate(cli client.Client) error { + var allErrs field.ErrorList + + allErrs = append(allErrs, validateDNSServiceIP( + mcp.Spec.Template.Spec.DNSServiceIP, + field.NewPath("spec").Child("template").Child("spec").Child("DNSServiceIP"))...) + + allErrs = append(allErrs, validateVersion( + mcp.Spec.Template.Spec.Version, + field.NewPath("spec").Child("template").Child("spec").Child("Version"))...) + + allErrs = append(allErrs, validateLoadBalancerProfile( + mcp.Spec.Template.Spec.LoadBalancerProfile, + field.NewPath("spec").Child("template").Child("spec").Child("LoadBalancerProfile"))...) + + allErrs = append(allErrs, validateManagedClusterNetwork( + cli, + mcp.Labels, + mcp.Namespace, + mcp.Spec.Template.Spec.DNSServiceIP, + mcp.Spec.Template.Spec.VirtualNetwork.Subnet, + field.NewPath("spec").Child("template").Child("spec"))...) + + allErrs = append(allErrs, validateName(mcp.Name, field.NewPath("Name"))...) + + allErrs = append(allErrs, validateAutoScalerProfile(mcp.Spec.Template.Spec.AutoScalerProfile, field.NewPath("spec").Child("template").Child("spec").Child("AutoScalerProfile"))...) + + return allErrs.ToAggregate() +} + // ValidateDelete implements webhook.Validator so a webhook will be registered for the type. func (mcpw *azureManagedControlPlaneTemplateWebhook) ValidateDelete(ctx context.Context, _ runtime.Object) (admission.Warnings, error) { return nil, nil } + +// validateVirtualNetworkTemplateUpdate validates update to VirtualNetworkTemplate. +func (m *AzureManagedControlPlaneTemplate) validateVirtualNetworkTemplateUpdate(old *AzureManagedControlPlaneTemplate) field.ErrorList { + var allErrs field.ErrorList + if old.Spec.Template.Spec.VirtualNetwork.Name != m.Spec.Template.Spec.VirtualNetwork.Name { + allErrs = append(allErrs, + field.Invalid( + field.NewPath("Spec", "Template", "Spec", "VirtualNetwork.Name"), + m.Spec.Template.Spec.VirtualNetwork.Name, + "Virtual Network Name is immutable")) + } + + if old.Spec.Template.Spec.VirtualNetwork.CIDRBlock != m.Spec.Template.Spec.VirtualNetwork.CIDRBlock { + allErrs = append(allErrs, + field.Invalid( + field.NewPath("Spec", "Template", "Spec", "VirtualNetwork.CIDRBlock"), + m.Spec.Template.Spec.VirtualNetwork.CIDRBlock, + "Virtual Network CIDRBlock is immutable")) + } + + if old.Spec.Template.Spec.VirtualNetwork.Subnet.Name != m.Spec.Template.Spec.VirtualNetwork.Subnet.Name { + allErrs = append(allErrs, + field.Invalid( + field.NewPath("Spec", "Template", "Spec", "VirtualNetwork.Subnet.Name"), + m.Spec.Template.Spec.VirtualNetwork.Subnet.Name, + "Subnet Name is immutable")) + } + + // NOTE: This only works because we force the user to set the CIDRBlock for both the + // managed and unmanaged Vnets. If we ever update the subnet cidr based on what's + // actually set in the subnet, and it is different from what's in the Spec, for + // unmanaged Vnets like we do with the AzureCluster this logic will break. + if old.Spec.Template.Spec.VirtualNetwork.Subnet.CIDRBlock != m.Spec.Template.Spec.VirtualNetwork.Subnet.CIDRBlock { + allErrs = append(allErrs, + field.Invalid( + field.NewPath("Spec", "Template", "Spec", "VirtualNetwork.Subnet.CIDRBlock"), + m.Spec.Template.Spec.VirtualNetwork.Subnet.CIDRBlock, + "Subnet CIDRBlock is immutable")) + } + + return allErrs +} + +// validateAPIServerAccessProfileTemplateUpdate validates update to APIServerAccessProfileTemplate. +func (m *AzureManagedControlPlaneTemplate) validateAPIServerAccessProfileTemplateUpdate(old *AzureManagedControlPlaneTemplate) field.ErrorList { + var allErrs field.ErrorList + + newAPIServerAccessProfileNormalized := &APIServerAccessProfile{} + oldAPIServerAccessProfileNormalized := &APIServerAccessProfile{} + if m.Spec.Template.Spec.APIServerAccessProfile != nil { + newAPIServerAccessProfileNormalized = &APIServerAccessProfile{ + EnablePrivateCluster: m.Spec.Template.Spec.APIServerAccessProfile.EnablePrivateCluster, + PrivateDNSZone: m.Spec.Template.Spec.APIServerAccessProfile.PrivateDNSZone, + EnablePrivateClusterPublicFQDN: m.Spec.Template.Spec.APIServerAccessProfile.EnablePrivateClusterPublicFQDN, + } + } + if old.Spec.Template.Spec.APIServerAccessProfile != nil { + oldAPIServerAccessProfileNormalized = &APIServerAccessProfile{ + EnablePrivateCluster: old.Spec.Template.Spec.APIServerAccessProfile.EnablePrivateCluster, + PrivateDNSZone: old.Spec.Template.Spec.APIServerAccessProfile.PrivateDNSZone, + EnablePrivateClusterPublicFQDN: old.Spec.Template.Spec.APIServerAccessProfile.EnablePrivateClusterPublicFQDN, + } + } + + if !reflect.DeepEqual(newAPIServerAccessProfileNormalized, oldAPIServerAccessProfileNormalized) { + allErrs = append(allErrs, + field.Invalid(field.NewPath("Spec", "Template", "Spec", "APIServerAccessProfile"), + m.Spec.Template.Spec.APIServerAccessProfile, "fields (except for AuthorizedIPRanges) are immutable"), + ) + } + + return allErrs +} From e3bc4cda1f546fa6ca27546ab7361c175a217222 Mon Sep 17 00:00:00 2001 From: willie-yao Date: Wed, 18 Oct 2023 20:47:40 +0000 Subject: [PATCH 4/5] Cleanup --- .../azuremanagedclustertemplate_webhook.go | 26 ++++++----- ...zuremanagedcontrolplanetemplate_webhook.go | 29 ++++++------ ...azuremanagedmachinepooltemplate_webhook.go | 40 ++++++++--------- config/webhook/manifests.yaml | 45 +++++++++++++++++++ main.go | 7 ++- 5 files changed, 99 insertions(+), 48 deletions(-) diff --git a/api/v1beta1/azuremanagedclustertemplate_webhook.go b/api/v1beta1/azuremanagedclustertemplate_webhook.go index 51b4b8055a3..3cb304e83cf 100644 --- a/api/v1beta1/azuremanagedclustertemplate_webhook.go +++ b/api/v1beta1/azuremanagedclustertemplate_webhook.go @@ -17,21 +17,20 @@ limitations under the License. package v1beta1 import ( + "fmt" "reflect" apierrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/util/validation/field" "sigs.k8s.io/cluster-api-provider-azure/feature" + "sigs.k8s.io/cluster-api-provider-azure/util/maps" capifeature "sigs.k8s.io/cluster-api/feature" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/webhook" "sigs.k8s.io/controller-runtime/pkg/webhook/admission" ) -// AzureManagedClusterTemplateImmutableMsg is the message used for errors on fields that are immutable. -const AzureManagedClusterTemplateImmutableMsg = "AzureManagedClusterTemplate spec.template.spec field is immutable. Please create new resource instead. ref doc: https://cluster-api.sigs.k8s.io/tasks/experimental-features/cluster-class/change-clusterclass.html" - // SetupWebhookWithManager sets up and registers the webhook with the manager. func (r *AzureManagedClusterTemplate) SetupWebhookWithManager(mgr ctrl.Manager) error { return ctrl.NewWebhookManagedBy(mgr). @@ -58,18 +57,25 @@ func (r *AzureManagedClusterTemplate) ValidateCreate() (admission.Warnings, erro // ValidateUpdate implements webhook.Validator so a webhook will be registered for the type. func (r *AzureManagedClusterTemplate) ValidateUpdate(oldRaw runtime.Object) (admission.Warnings, error) { - var allErrs field.ErrorList old := oldRaw.(*AzureManagedClusterTemplate) - if !reflect.DeepEqual(r.Spec.Template.Spec, old.Spec.Template.Spec) { + var allErrs field.ErrorList + + // custom headers are immutable + oldCustomHeaders := maps.FilterByKeyPrefix(old.ObjectMeta.Annotations, CustomHeaderPrefix) + newCustomHeaders := maps.FilterByKeyPrefix(r.ObjectMeta.Annotations, CustomHeaderPrefix) + if !reflect.DeepEqual(oldCustomHeaders, newCustomHeaders) { allErrs = append(allErrs, - field.Invalid(field.NewPath("AzureManagedClusterTemplate", "spec", "template", "spec"), rScanInterval, AzureManagedClusterTemplateImmutableMsg), - ) + field.Invalid( + field.NewPath("metadata", "annotations"), + r.ObjectMeta.Annotations, + fmt.Sprintf("annotations with '%s' prefix are immutable", CustomHeaderPrefix))) } - if len(allErrs) == 0 { - return nil, nil + if len(allErrs) != 0 { + return nil, apierrors.NewInvalid(GroupVersion.WithKind("AzureManagedClusterTemplate").GroupKind(), r.Name, allErrs) } - return nil, apierrors.NewInvalid(GroupVersion.WithKind("AzureManagedClusterTemplate").GroupKind(), r.Name, allErrs) + + return nil, nil } // ValidateDelete implements webhook.Validator so a webhook will be registered for the type. diff --git a/api/v1beta1/azuremanagedcontrolplanetemplate_webhook.go b/api/v1beta1/azuremanagedcontrolplanetemplate_webhook.go index 2a5873f1ab9..4f9d6ba6ad0 100644 --- a/api/v1beta1/azuremanagedcontrolplanetemplate_webhook.go +++ b/api/v1beta1/azuremanagedcontrolplanetemplate_webhook.go @@ -31,11 +31,8 @@ import ( "sigs.k8s.io/controller-runtime/pkg/webhook/admission" ) -// AzureManagedControlPlaneTemplateImmutableMsg is the message used for errors on fields that are immutable. -const AzureManagedControlPlaneTemplateImmutableMsg = "AzureManagedControlPlaneTemplate spec.template.spec field is immutable. Please create new resource instead. ref doc: https://cluster-api.sigs.k8s.io/tasks/experimental-features/cluster-class/change-clusterclass.html" - -// SetupAzureManagedControlPlaneTemplateWithManager will set up the webhook to be managed by the specified manager. -func SetupAzureManagedControlPlaneTemplateWithManager(mgr ctrl.Manager) error { +// SetupAzureManagedControlPlaneTemplateWebhookWithManager will set up the webhook to be managed by the specified manager. +func SetupAzureManagedControlPlaneTemplateWebhookWithManager(mgr ctrl.Manager) error { mcpw := &azureManagedControlPlaneTemplateWebhook{Client: mgr.GetClient()} return ctrl.NewWebhookManagedBy(mgr). For(&AzureManagedControlPlaneTemplate{}). @@ -67,7 +64,7 @@ func (mcpw *azureManagedControlPlaneTemplateWebhook) ValidateCreate(ctx context. if !ok { return nil, apierrors.NewBadRequest("expected an AzureManagedControlPlaneTemplate") } - // NOTE: AzureManagedControlPlane relies upon MachinePools, which is behind a feature gate flag. + // NOTE: AzureManagedControlPlaneTemplate relies upon MachinePools, which is behind a feature gate flag. // The webhook must prevent creating new objects in case the feature flag is disabled. if !feature.Gates.Enabled(capifeature.MachinePool) { return nil, field.Forbidden( @@ -91,42 +88,42 @@ func (mcpw *azureManagedControlPlaneTemplateWebhook) ValidateUpdate(ctx context. return nil, apierrors.NewBadRequest("expected an AzureManagedControlPlaneTemplate") } if err := webhookutils.ValidateImmutable( - field.NewPath("Spec", "SubscriptionID"), + field.NewPath("Spec", "Template", "Spec", "SubscriptionID"), old.Spec.Template.Spec.SubscriptionID, mcp.Spec.Template.Spec.SubscriptionID); err != nil { allErrs = append(allErrs, err) } if err := webhookutils.ValidateImmutable( - field.NewPath("Spec", "Location"), + field.NewPath("Spec", "Template", "Spec", "Location"), old.Spec.Template.Spec.Location, mcp.Spec.Template.Spec.Location); err != nil { allErrs = append(allErrs, err) } if err := webhookutils.ValidateImmutable( - field.NewPath("Spec", "DNSServiceIP"), + field.NewPath("Spec", "Template", "Spec", "DNSServiceIP"), old.Spec.Template.Spec.DNSServiceIP, mcp.Spec.Template.Spec.DNSServiceIP); err != nil { allErrs = append(allErrs, err) } if err := webhookutils.ValidateImmutable( - field.NewPath("Spec", "NetworkPlugin"), + field.NewPath("Spec", "Template", "Spec", "NetworkPlugin"), old.Spec.Template.Spec.NetworkPlugin, mcp.Spec.Template.Spec.NetworkPlugin); err != nil { allErrs = append(allErrs, err) } if err := webhookutils.ValidateImmutable( - field.NewPath("Spec", "NetworkPolicy"), + field.NewPath("Spec", "Template", "Spec", "NetworkPolicy"), old.Spec.Template.Spec.NetworkPolicy, mcp.Spec.Template.Spec.NetworkPolicy); err != nil { allErrs = append(allErrs, err) } if err := webhookutils.ValidateImmutable( - field.NewPath("Spec", "LoadBalancerSKU"), + field.NewPath("Spec", "Template", "Spec", "LoadBalancerSKU"), old.Spec.Template.Spec.LoadBalancerSKU, mcp.Spec.Template.Spec.LoadBalancerSKU); err != nil { allErrs = append(allErrs, err) @@ -136,21 +133,21 @@ func (mcpw *azureManagedControlPlaneTemplateWebhook) ValidateUpdate(ctx context. if mcp.Spec.Template.Spec.AADProfile == nil { allErrs = append(allErrs, field.Invalid( - field.NewPath("Spec", "AADProfile"), + field.NewPath("Spec", "Template", "Spec", "AADProfile"), mcp.Spec.Template.Spec.AADProfile, "field cannot be nil, cannot disable AADProfile")) } else { if !mcp.Spec.Template.Spec.AADProfile.Managed && old.Spec.Template.Spec.AADProfile.Managed { allErrs = append(allErrs, field.Invalid( - field.NewPath("Spec", "AADProfile.Managed"), + field.NewPath("Spec", "Template", "Spec", "AADProfile.Managed"), mcp.Spec.Template.Spec.AADProfile.Managed, "cannot set AADProfile.Managed to false")) } if len(mcp.Spec.Template.Spec.AADProfile.AdminGroupObjectIDs) == 0 { allErrs = append(allErrs, field.Invalid( - field.NewPath("Spec", "AADProfile.AdminGroupObjectIDs"), + field.NewPath("Spec", "Template", "Spec", "AADProfile.AdminGroupObjectIDs"), mcp.Spec.Template.Spec.AADProfile.AdminGroupObjectIDs, "length of AADProfile.AdminGroupObjectIDs cannot be zero")) } @@ -161,7 +158,7 @@ func (mcpw *azureManagedControlPlaneTemplateWebhook) ValidateUpdate(ctx context. // Updating outboundType after cluster creation (PREVIEW) // https://learn.microsoft.com/en-us/azure/aks/egress-outboundtype#updating-outboundtype-after-cluster-creation-preview if err := webhookutils.ValidateImmutable( - field.NewPath("Spec", "OutboundType"), + field.NewPath("Spec", "Template", "Spec", "OutboundType"), old.Spec.Template.Spec.OutboundType, mcp.Spec.Template.Spec.OutboundType); err != nil { allErrs = append(allErrs, err) diff --git a/api/v1beta1/azuremanagedmachinepooltemplate_webhook.go b/api/v1beta1/azuremanagedmachinepooltemplate_webhook.go index 979432d9e7e..ee4aaccb974 100644 --- a/api/v1beta1/azuremanagedmachinepooltemplate_webhook.go +++ b/api/v1beta1/azuremanagedmachinepooltemplate_webhook.go @@ -36,12 +36,9 @@ import ( "sigs.k8s.io/controller-runtime/pkg/webhook/admission" ) -// AzureManagedMachinePoolTemplateImmutableMsg is the message used for errors on fields that are immutable. -const AzureManagedMachinePoolTemplateImmutableMsg = "AzureManagedMachinePoolTemplate spec.template.spec field is immutable. Please create new resource instead. ref doc: https://cluster-api.sigs.k8s.io/tasks/experimental-features/cluster-class/change-clusterclass.html" - -// SetupAzureManagedMachinePoolTemplateWithManager will set up the webhook to be managed by the specified manager. -func SetupAzureManagedMachinePoolTemplateWithManager(mgr ctrl.Manager) error { - mpw := &AzureManagedMachinePoolTemplateWebhook{Client: mgr.GetClient()} +// SetupAzureManagedMachinePoolTemplateWebhookWithManager will set up the webhook to be managed by the specified manager. +func SetupAzureManagedMachinePoolTemplateWebhookWithManager(mgr ctrl.Manager) error { + mpw := &azureManagedMachinePoolTemplateWebhook{Client: mgr.GetClient()} return ctrl.NewWebhookManagedBy(mgr). For(&AzureManagedMachinePoolTemplate{}). WithDefaulter(mpw). @@ -49,15 +46,14 @@ func SetupAzureManagedMachinePoolTemplateWithManager(mgr ctrl.Manager) error { Complete() } -// +kubebuilder:webhook:verbs=create;update,path=/validate-infrastructure-cluster-x-k8s-io-v1beta1-azuremanagedmachinepooltemplate,mutating=false,failurePolicy=fail,matchPolicy=Equivalent,groups=infrastructure.cluster.x-k8s.io,resources=azuremanagedmachinepooltemplates,versions=v1beta1,name=validation.azuremanagedmachinepooltemplate.infrastructure.cluster.x-k8s.io,sideEffects=None,admissionReviewVersions=v1;v1beta1 -// +kubebuilder:webhook:verbs=create;update,path=/mutate-infrastructure-cluster-x-k8s-io-v1beta1-azuremanagedmachinepooltemplate,mutating=true,failurePolicy=fail,matchPolicy=Equivalent,groups=infrastructure.cluster.x-k8s.io,resources=azuremanagedmachinepooltemplates,versions=v1beta1,name=default.azuremanagedmachinepooltemplate.infrastructure.cluster.x-k8s.io,sideEffects=None,admissionReviewVersions=v1;v1beta1 +//+kubebuilder:webhook:path=/mutate-infrastructure-cluster-x-k8s-io-v1beta1-azuremanagedmachinepooltemplate,mutating=true,failurePolicy=fail,matchPolicy=Equivalent,groups=infrastructure.cluster.x-k8s.io,resources=azuremanagedmachinepooltemplates,verbs=create;update,versions=v1beta1,name=default.azuremanagedmachinepooltemplates.infrastructure.cluster.x-k8s.io,sideEffects=None,admissionReviewVersions=v1;v1beta1 -type AzureManagedMachinePoolTemplateWebhook struct { +type azureManagedMachinePoolTemplateWebhook struct { Client client.Client } // Default implements webhook.Defaulter so a webhook will be registered for the type. -func (mpw *AzureManagedMachinePoolTemplateWebhook) Default(ctx context.Context, obj runtime.Object) error { +func (mpw *azureManagedMachinePoolTemplateWebhook) Default(ctx context.Context, obj runtime.Object) error { mp, ok := obj.(*AzureManagedMachinePoolTemplate) if !ok { return apierrors.NewBadRequest("expected an AzureManagedMachinePoolTemplate") @@ -78,8 +74,10 @@ func (mpw *AzureManagedMachinePoolTemplateWebhook) Default(ctx context.Context, return nil } +//+kubebuilder:webhook:verbs=create;update;delete,path=/validate-infrastructure-cluster-x-k8s-io-v1beta1-azuremanagedmachinepooltemplate,mutating=false,failurePolicy=fail,matchPolicy=Equivalent,groups=infrastructure.cluster.x-k8s.io,resources=azuremanagedmachinepooltemplates,versions=v1beta1,name=validation.azuremanagedmachinepooltemplates.infrastructure.cluster.x-k8s.io,sideEffects=None,admissionReviewVersions=v1;v1beta1 + // ValidateCreate implements webhook.Validator so a webhook will be registered for the type. -func (mpw *AzureManagedMachinePoolTemplateWebhook) ValidateCreate(ctx context.Context, obj runtime.Object) (admission.Warnings, error) { +func (mpw *azureManagedMachinePoolTemplateWebhook) ValidateCreate(ctx context.Context, obj runtime.Object) (admission.Warnings, error) { mp, ok := obj.(*AzureManagedMachinePoolTemplate) if !ok { return nil, apierrors.NewBadRequest("expected an AzureManagedMachinePoolTemplate") @@ -96,45 +94,45 @@ func (mpw *AzureManagedMachinePoolTemplateWebhook) ValidateCreate(ctx context.Co errs = append(errs, validateMaxPods( mp.Spec.Template.Spec.MaxPods, - field.NewPath("Spec", "MaxPods"))) + field.NewPath("Spec", "Template", "Spec", "MaxPods"))) errs = append(errs, validateOSType( mp.Spec.Template.Spec.Mode, mp.Spec.Template.Spec.OSType, - field.NewPath("Spec", "OSType"))) + field.NewPath("Spec", "Template", "Spec", "OSType"))) errs = append(errs, validateAgentPoolName( mp.Spec.Template.Spec.OSType, mp.Spec.Template.Spec.Name, - field.NewPath("Spec", "Name"))) + field.NewPath("Spec", "Template", "Spec", "Name"))) errs = append(errs, validateNodeLabels( mp.Spec.Template.Spec.NodeLabels, - field.NewPath("Spec", "NodeLabels"))) + field.NewPath("Spec", "Template", "Spec", "NodeLabels"))) errs = append(errs, validateNodePublicIPPrefixID( mp.Spec.Template.Spec.NodePublicIPPrefixID, - field.NewPath("Spec", "NodePublicIPPrefixID"))) + field.NewPath("Spec", "Template", "Spec", "NodePublicIPPrefixID"))) errs = append(errs, validateEnableNodePublicIP( mp.Spec.Template.Spec.EnableNodePublicIP, mp.Spec.Template.Spec.NodePublicIPPrefixID, - field.NewPath("Spec", "EnableNodePublicIP"))) + field.NewPath("Spec", "Template", "Spec", "EnableNodePublicIP"))) errs = append(errs, validateKubeletConfig( mp.Spec.Template.Spec.KubeletConfig, - field.NewPath("Spec", "KubeletConfig"))) + field.NewPath("Spec", "Template", "Spec", "KubeletConfig"))) errs = append(errs, validateLinuxOSConfig( mp.Spec.Template.Spec.LinuxOSConfig, mp.Spec.Template.Spec.KubeletConfig, - field.NewPath("Spec", "LinuxOSConfig"))) + field.NewPath("Spec", "Template", "Spec", "LinuxOSConfig"))) return nil, kerrors.NewAggregate(errs) } // ValidateUpdate implements webhook.Validator so a webhook will be registered for the type. -func (mpw *AzureManagedMachinePoolTemplateWebhook) ValidateUpdate(ctx context.Context, oldObj, newObj runtime.Object) (admission.Warnings, error) { +func (mpw *azureManagedMachinePoolTemplateWebhook) ValidateUpdate(ctx context.Context, oldObj, newObj runtime.Object) (admission.Warnings, error) { var allErrs field.ErrorList old, ok := oldObj.(*AzureManagedMachinePoolTemplate) if !ok { @@ -291,7 +289,7 @@ func (mpw *AzureManagedMachinePoolTemplateWebhook) ValidateUpdate(ctx context.Co } // ValidateDelete implements webhook.Validator so a webhook will be registered for the type. -func (mpw *AzureManagedMachinePoolTemplateWebhook) ValidateDelete(ctx context.Context, obj runtime.Object) (admission.Warnings, error) { +func (mpw *azureManagedMachinePoolTemplateWebhook) ValidateDelete(ctx context.Context, obj runtime.Object) (admission.Warnings, error) { mp, ok := obj.(*AzureManagedMachinePoolTemplate) if !ok { return nil, apierrors.NewBadRequest("expected an AzureManagedMachinePoolTemplate") diff --git a/config/webhook/manifests.yaml b/config/webhook/manifests.yaml index dcd4f04c897..28e2d98746b 100644 --- a/config/webhook/manifests.yaml +++ b/config/webhook/manifests.yaml @@ -158,6 +158,28 @@ webhooks: resources: - azuremanagedmachinepools sideEffects: None +- admissionReviewVersions: + - v1 + - v1beta1 + clientConfig: + service: + name: webhook-service + namespace: system + path: /mutate-infrastructure-cluster-x-k8s-io-v1beta1-azuremanagedmachinepooltemplate + failurePolicy: Fail + matchPolicy: Equivalent + name: default.azuremanagedmachinepooltemplates.infrastructure.cluster.x-k8s.io + rules: + - apiGroups: + - infrastructure.cluster.x-k8s.io + apiVersions: + - v1beta1 + operations: + - CREATE + - UPDATE + resources: + - azuremanagedmachinepooltemplates + sideEffects: None - admissionReviewVersions: - v1 - v1beta1 @@ -403,6 +425,29 @@ webhooks: resources: - azuremanagedmachinepools sideEffects: None +- admissionReviewVersions: + - v1 + - v1beta1 + clientConfig: + service: + name: webhook-service + namespace: system + path: /validate-infrastructure-cluster-x-k8s-io-v1beta1-azuremanagedmachinepooltemplate + failurePolicy: Fail + matchPolicy: Equivalent + name: validation.azuremanagedmachinepooltemplates.infrastructure.cluster.x-k8s.io + rules: + - apiGroups: + - infrastructure.cluster.x-k8s.io + apiVersions: + - v1beta1 + operations: + - CREATE + - UPDATE + - DELETE + resources: + - azuremanagedmachinepooltemplates + sideEffects: None - admissionReviewVersions: - v1 - v1beta1 diff --git a/main.go b/main.go index 46fbd6f67a5..882575a9e54 100644 --- a/main.go +++ b/main.go @@ -539,12 +539,17 @@ func registerWebhooks(mgr manager.Manager) { os.Exit(1) } + if err := infrav1.SetupAzureManagedMachinePoolTemplateWebhookWithManager(mgr); err != nil { + setupLog.Error(err, "unable to create webhook", "webhook", "AzureManagedMachinePoolTemplate") + os.Exit(1) + } + if err := infrav1.SetupAzureManagedControlPlaneWebhookWithManager(mgr); err != nil { setupLog.Error(err, "unable to create webhook", "webhook", "AzureManagedControlPlane") os.Exit(1) } - if err := infrav1.SetupAzureManagedControlPlaneTemplateWithManager(mgr); err != nil { + if err := infrav1.SetupAzureManagedControlPlaneTemplateWebhookWithManager(mgr); err != nil { setupLog.Error(err, "unable to create webhook", "webhook", "AzureManagedControlPlaneTemplate") os.Exit(1) } From 7fb9fc5ec834c2352206cc6633c22e9b4a91ca81 Mon Sep 17 00:00:00 2001 From: willie-yao Date: Wed, 18 Oct 2023 21:33:36 +0000 Subject: [PATCH 5/5] More cleanup --- .../azuremanagedcontrolplane_default.go | 1 - .../azuremanagedcontrolplane_webhook.go | 1 + ...azuremanagedmachinepooltemplate_webhook.go | 3 ++- controllers/helpers.go | 1 - ...ter-template-aks-clusterclass-cluster.yaml | 21 ------------------- ...er.yaml => cluster-template-topology.yaml} | 0 .../cluster.yaml | 0 .../kustomization.yaml | 0 .../test/ci/prow-topology/kustomization.yaml | 2 +- 9 files changed, 4 insertions(+), 25 deletions(-) delete mode 100644 templates/cluster-template-aks-clusterclass-cluster.yaml rename templates/{cluster-template-clusterclass-cluster.yaml => cluster-template-topology.yaml} (100%) rename templates/flavors/{clusterclass-cluster => topology}/cluster.yaml (100%) rename templates/flavors/{clusterclass-cluster => topology}/kustomization.yaml (100%) diff --git a/api/v1beta1/azuremanagedcontrolplane_default.go b/api/v1beta1/azuremanagedcontrolplane_default.go index 26720fbe2ac..3c227b1bebc 100644 --- a/api/v1beta1/azuremanagedcontrolplane_default.go +++ b/api/v1beta1/azuremanagedcontrolplane_default.go @@ -56,7 +56,6 @@ func (m *AzureManagedControlPlane) setDefaultResourceGroupName() { if m.Spec.ResourceGroupName == "" { if clusterName, ok := m.Labels[clusterv1.ClusterNameLabel]; ok { m.Spec.ResourceGroupName = clusterName - fmt.Printf("WILLIE ResourceGroupName is empty, defaulting to %s\n", m.Spec.ResourceGroupName) } } } diff --git a/api/v1beta1/azuremanagedcontrolplane_webhook.go b/api/v1beta1/azuremanagedcontrolplane_webhook.go index 2da8ad01bac..feaa15b36eb 100644 --- a/api/v1beta1/azuremanagedcontrolplane_webhook.go +++ b/api/v1beta1/azuremanagedcontrolplane_webhook.go @@ -26,6 +26,7 @@ import ( "strings" "time" + "github.com/pkg/errors" apierrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/runtime" kerrors "k8s.io/apimachinery/pkg/util/errors" diff --git a/api/v1beta1/azuremanagedmachinepooltemplate_webhook.go b/api/v1beta1/azuremanagedmachinepooltemplate_webhook.go index ee4aaccb974..722b70b78c0 100644 --- a/api/v1beta1/azuremanagedmachinepooltemplate_webhook.go +++ b/api/v1beta1/azuremanagedmachinepooltemplate_webhook.go @@ -101,7 +101,8 @@ func (mpw *azureManagedMachinePoolTemplateWebhook) ValidateCreate(ctx context.Co mp.Spec.Template.Spec.OSType, field.NewPath("Spec", "Template", "Spec", "OSType"))) - errs = append(errs, validateAgentPoolName( + errs = append(errs, validateMPName( + mp.Name, mp.Spec.Template.Spec.OSType, mp.Spec.Template.Spec.Name, field.NewPath("Spec", "Template", "Spec", "Name"))) diff --git a/controllers/helpers.go b/controllers/helpers.go index f7d1ce3c23f..971d426c952 100644 --- a/controllers/helpers.go +++ b/controllers/helpers.go @@ -945,7 +945,6 @@ func MachinePoolToAzureManagedControlPlaneMapFunc(ctx context.Context, c client. gk := gvk.GroupKind() ref := cluster.Spec.ControlPlaneRef - fmt.Printf("WILLIE cluster spec: %v", cluster.Spec) if ref == nil || ref.Name == "" { log.Error(err, "control plane ref is nil or empty") return nil diff --git a/templates/cluster-template-aks-clusterclass-cluster.yaml b/templates/cluster-template-aks-clusterclass-cluster.yaml deleted file mode 100644 index c78efb4b389..00000000000 --- a/templates/cluster-template-aks-clusterclass-cluster.yaml +++ /dev/null @@ -1,21 +0,0 @@ -apiVersion: cluster.x-k8s.io/v1beta1 -kind: Cluster -metadata: - name: ${CLUSTER_NAME} - namespace: default -spec: - clusterNetwork: - pods: - cidrBlocks: - - 192.168.0.0/16 - topology: - class: ${CLUSTER_CLASS_NAME} - version: ${KUBERNETES_VERSION} - workers: - machinePools: - - class: default-system - name: mp-0 - replicas: 1 - - class: default-worker - name: mp-1 - replicas: 1 diff --git a/templates/cluster-template-clusterclass-cluster.yaml b/templates/cluster-template-topology.yaml similarity index 100% rename from templates/cluster-template-clusterclass-cluster.yaml rename to templates/cluster-template-topology.yaml diff --git a/templates/flavors/clusterclass-cluster/cluster.yaml b/templates/flavors/topology/cluster.yaml similarity index 100% rename from templates/flavors/clusterclass-cluster/cluster.yaml rename to templates/flavors/topology/cluster.yaml diff --git a/templates/flavors/clusterclass-cluster/kustomization.yaml b/templates/flavors/topology/kustomization.yaml similarity index 100% rename from templates/flavors/clusterclass-cluster/kustomization.yaml rename to templates/flavors/topology/kustomization.yaml diff --git a/templates/test/ci/prow-topology/kustomization.yaml b/templates/test/ci/prow-topology/kustomization.yaml index 77f435eada9..e4da9c2e30e 100644 --- a/templates/test/ci/prow-topology/kustomization.yaml +++ b/templates/test/ci/prow-topology/kustomization.yaml @@ -2,7 +2,7 @@ apiVersion: kustomize.config.k8s.io/v1beta1 kind: Kustomization namespace: default resources: - - ../../../flavors/clusterclass-cluster/cluster.yaml + - ../../../flavors/topology/cluster.yaml - cni-resource-set.yaml - ../../../addons/windows/csi-proxy/csi-proxy-resource-set.yaml - ../../../addons/cluster-api-helm/calico.yaml