diff --git a/api/v1beta1/azuremanagedcontrolplane_types.go b/api/v1beta1/azuremanagedcontrolplane_types.go
index 8cf4e13776b..94a9e1d2921 100644
--- a/api/v1beta1/azuremanagedcontrolplane_types.go
+++ b/api/v1beta1/azuremanagedcontrolplane_types.go
@@ -118,7 +118,7 @@ type AzureManagedControlPlaneSpec struct {
// NetworkPlugin used for building Kubernetes network.
// Allowed values are "azure", "kubenet".
// Immutable.
- // +kubebuilder:validation:Enum=azure;kubenet
+ // +kubebuilder:validation:Enum=azure;kubenet;none
// +optional
NetworkPlugin *string `json:"networkPlugin,omitempty"`
diff --git a/azure/interfaces.go b/azure/interfaces.go
index 8f8f41ffbe4..7a6d611eb02 100644
--- a/azure/interfaces.go
+++ b/azure/interfaces.go
@@ -21,8 +21,10 @@ import (
"github.com/Azure/azure-sdk-for-go/sdk/azcore"
"github.com/Azure/azure-service-operator/v2/pkg/genruntime"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
infrav1 "sigs.k8s.io/cluster-api-provider-azure/api/v1beta1"
clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"
+ "sigs.k8s.io/controller-runtime/pkg/client"
)
// Reconciler is a generic interface for a controller reconciler which has Reconcile and Delete methods.
@@ -79,6 +81,7 @@ type NetworkDescriber interface {
type ClusterDescriber interface {
Authorizer
ResourceGroup() string
+ NodeResourceGroup() string
ClusterName() string
Location() string
ExtendedLocation() *infrav1.ExtendedLocationSpec
@@ -104,6 +107,9 @@ type AsyncStatusUpdater interface {
type ClusterScoper interface {
ClusterDescriber
NetworkDescriber
+ AsyncStatusUpdater
+ GetClient() client.Client
+ GetDeletionTimestamp() *metav1.Time
}
// ManagedClusterScoper defines the interface for ManagedClusterScope.
diff --git a/azure/mock_azure/azure_mock.go b/azure/mock_azure/azure_mock.go
index 1bf5d35e5b0..3ad477ea9f7 100644
--- a/azure/mock_azure/azure_mock.go
+++ b/azure/mock_azure/azure_mock.go
@@ -31,8 +31,10 @@ import (
azcore "github.com/Azure/azure-sdk-for-go/sdk/azcore"
genruntime "github.com/Azure/azure-service-operator/v2/pkg/genruntime"
gomock "go.uber.org/mock/gomock"
+ v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
v1beta1 "sigs.k8s.io/cluster-api-provider-azure/api/v1beta1"
v1beta10 "sigs.k8s.io/cluster-api/api/v1beta1"
+ client "sigs.k8s.io/controller-runtime/pkg/client"
)
// MockReconciler is a mock of Reconciler interface.
@@ -787,6 +789,20 @@ func (mr *MockClusterDescriberMockRecorder) Location() *gomock.Call {
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Location", reflect.TypeOf((*MockClusterDescriber)(nil).Location))
}
+// NodeResourceGroup mocks base method.
+func (m *MockClusterDescriber) NodeResourceGroup() string {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "NodeResourceGroup")
+ ret0, _ := ret[0].(string)
+ return ret0
+}
+
+// NodeResourceGroup indicates an expected call of NodeResourceGroup.
+func (mr *MockClusterDescriberMockRecorder) NodeResourceGroup() *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NodeResourceGroup", reflect.TypeOf((*MockClusterDescriber)(nil).NodeResourceGroup))
+}
+
// ResourceGroup mocks base method.
func (m *MockClusterDescriber) ResourceGroup() string {
m.ctrl.T.Helper()
@@ -1145,6 +1161,18 @@ func (mr *MockClusterScoperMockRecorder) ControlPlaneSubnet() *gomock.Call {
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ControlPlaneSubnet", reflect.TypeOf((*MockClusterScoper)(nil).ControlPlaneSubnet))
}
+// DeleteLongRunningOperationState mocks base method.
+func (m *MockClusterScoper) DeleteLongRunningOperationState(arg0, arg1, arg2 string) {
+ m.ctrl.T.Helper()
+ m.ctrl.Call(m, "DeleteLongRunningOperationState", arg0, arg1, arg2)
+}
+
+// DeleteLongRunningOperationState indicates an expected call of DeleteLongRunningOperationState.
+func (mr *MockClusterScoperMockRecorder) DeleteLongRunningOperationState(arg0, arg1, arg2 any) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteLongRunningOperationState", reflect.TypeOf((*MockClusterScoper)(nil).DeleteLongRunningOperationState), arg0, arg1, arg2)
+}
+
// ExtendedLocation mocks base method.
func (m *MockClusterScoper) ExtendedLocation() *v1beta1.ExtendedLocationSpec {
m.ctrl.T.Helper()
@@ -1201,6 +1229,48 @@ func (mr *MockClusterScoperMockRecorder) FailureDomains() *gomock.Call {
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "FailureDomains", reflect.TypeOf((*MockClusterScoper)(nil).FailureDomains))
}
+// GetClient mocks base method.
+func (m *MockClusterScoper) GetClient() client.Client {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "GetClient")
+ ret0, _ := ret[0].(client.Client)
+ return ret0
+}
+
+// GetClient indicates an expected call of GetClient.
+func (mr *MockClusterScoperMockRecorder) GetClient() *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetClient", reflect.TypeOf((*MockClusterScoper)(nil).GetClient))
+}
+
+// GetDeletionTimestamp mocks base method.
+func (m *MockClusterScoper) GetDeletionTimestamp() *v1.Time {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "GetDeletionTimestamp")
+ ret0, _ := ret[0].(*v1.Time)
+ return ret0
+}
+
+// GetDeletionTimestamp indicates an expected call of GetDeletionTimestamp.
+func (mr *MockClusterScoperMockRecorder) GetDeletionTimestamp() *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetDeletionTimestamp", reflect.TypeOf((*MockClusterScoper)(nil).GetDeletionTimestamp))
+}
+
+// GetLongRunningOperationState mocks base method.
+func (m *MockClusterScoper) GetLongRunningOperationState(arg0, arg1, arg2 string) *v1beta1.Future {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "GetLongRunningOperationState", arg0, arg1, arg2)
+ ret0, _ := ret[0].(*v1beta1.Future)
+ return ret0
+}
+
+// GetLongRunningOperationState indicates an expected call of GetLongRunningOperationState.
+func (mr *MockClusterScoperMockRecorder) GetLongRunningOperationState(arg0, arg1, arg2 any) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetLongRunningOperationState", reflect.TypeOf((*MockClusterScoper)(nil).GetLongRunningOperationState), arg0, arg1, arg2)
+}
+
// GetPrivateDNSZoneName mocks base method.
func (m *MockClusterScoper) GetPrivateDNSZoneName() string {
m.ctrl.T.Helper()
@@ -1285,6 +1355,20 @@ func (mr *MockClusterScoperMockRecorder) Location() *gomock.Call {
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Location", reflect.TypeOf((*MockClusterScoper)(nil).Location))
}
+// NodeResourceGroup mocks base method.
+func (m *MockClusterScoper) NodeResourceGroup() string {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "NodeResourceGroup")
+ ret0, _ := ret[0].(string)
+ return ret0
+}
+
+// NodeResourceGroup indicates an expected call of NodeResourceGroup.
+func (mr *MockClusterScoperMockRecorder) NodeResourceGroup() *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NodeResourceGroup", reflect.TypeOf((*MockClusterScoper)(nil).NodeResourceGroup))
+}
+
// NodeSubnets mocks base method.
func (m *MockClusterScoper) NodeSubnets() []v1beta1.SubnetSpec {
m.ctrl.T.Helper()
@@ -1341,6 +1425,18 @@ func (mr *MockClusterScoperMockRecorder) ResourceGroup() *gomock.Call {
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ResourceGroup", reflect.TypeOf((*MockClusterScoper)(nil).ResourceGroup))
}
+// SetLongRunningOperationState mocks base method.
+func (m *MockClusterScoper) SetLongRunningOperationState(arg0 *v1beta1.Future) {
+ m.ctrl.T.Helper()
+ m.ctrl.Call(m, "SetLongRunningOperationState", arg0)
+}
+
+// SetLongRunningOperationState indicates an expected call of SetLongRunningOperationState.
+func (mr *MockClusterScoperMockRecorder) SetLongRunningOperationState(arg0 any) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetLongRunningOperationState", reflect.TypeOf((*MockClusterScoper)(nil).SetLongRunningOperationState), arg0)
+}
+
// SetSubnet mocks base method.
func (m *MockClusterScoper) SetSubnet(arg0 v1beta1.SubnetSpec) {
m.ctrl.T.Helper()
@@ -1423,6 +1519,42 @@ func (mr *MockClusterScoperMockRecorder) Token() *gomock.Call {
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Token", reflect.TypeOf((*MockClusterScoper)(nil).Token))
}
+// UpdateDeleteStatus mocks base method.
+func (m *MockClusterScoper) UpdateDeleteStatus(arg0 v1beta10.ConditionType, arg1 string, arg2 error) {
+ m.ctrl.T.Helper()
+ m.ctrl.Call(m, "UpdateDeleteStatus", arg0, arg1, arg2)
+}
+
+// UpdateDeleteStatus indicates an expected call of UpdateDeleteStatus.
+func (mr *MockClusterScoperMockRecorder) UpdateDeleteStatus(arg0, arg1, arg2 any) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateDeleteStatus", reflect.TypeOf((*MockClusterScoper)(nil).UpdateDeleteStatus), arg0, arg1, arg2)
+}
+
+// UpdatePatchStatus mocks base method.
+func (m *MockClusterScoper) UpdatePatchStatus(arg0 v1beta10.ConditionType, arg1 string, arg2 error) {
+ m.ctrl.T.Helper()
+ m.ctrl.Call(m, "UpdatePatchStatus", arg0, arg1, arg2)
+}
+
+// UpdatePatchStatus indicates an expected call of UpdatePatchStatus.
+func (mr *MockClusterScoperMockRecorder) UpdatePatchStatus(arg0, arg1, arg2 any) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdatePatchStatus", reflect.TypeOf((*MockClusterScoper)(nil).UpdatePatchStatus), arg0, arg1, arg2)
+}
+
+// UpdatePutStatus mocks base method.
+func (m *MockClusterScoper) UpdatePutStatus(arg0 v1beta10.ConditionType, arg1 string, arg2 error) {
+ m.ctrl.T.Helper()
+ m.ctrl.Call(m, "UpdatePutStatus", arg0, arg1, arg2)
+}
+
+// UpdatePutStatus indicates an expected call of UpdatePutStatus.
+func (mr *MockClusterScoperMockRecorder) UpdatePutStatus(arg0, arg1, arg2 any) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdatePutStatus", reflect.TypeOf((*MockClusterScoper)(nil).UpdatePutStatus), arg0, arg1, arg2)
+}
+
// Vnet mocks base method.
func (m *MockClusterScoper) Vnet() *v1beta1.VnetSpec {
m.ctrl.T.Helper()
diff --git a/azure/scope/cluster.go b/azure/scope/cluster.go
index e6602d6fd2e..581626bf544 100644
--- a/azure/scope/cluster.go
+++ b/azure/scope/cluster.go
@@ -136,6 +136,11 @@ func (s *ClusterScope) GetClient() client.Client {
return s.Client
}
+// GetDeletionTimestamp returns the deletion timestamp of the Cluster.
+func (s *ClusterScope) GetDeletionTimestamp() *metav1.Time {
+ return s.Cluster.DeletionTimestamp
+}
+
// PublicIPSpecs returns the public IP specs.
func (s *ClusterScope) PublicIPSpecs() []azure.ResourceSpecGetter {
var publicIPSpecs []azure.ResourceSpecGetter
@@ -740,6 +745,12 @@ func (s *ClusterScope) ResourceGroup() string {
return s.AzureCluster.Spec.ResourceGroup
}
+// NodeResourceGroup returns the resource group where nodes live.
+// For AzureClusters this is the same as the cluster RG.
+func (s *ClusterScope) NodeResourceGroup() string {
+ return s.ResourceGroup()
+}
+
// ClusterName returns the cluster name.
func (s *ClusterScope) ClusterName() string {
return s.Cluster.Name
diff --git a/azure/scope/machine.go b/azure/scope/machine.go
index 14f3985c8ab..273ddf470e2 100644
--- a/azure/scope/machine.go
+++ b/azure/scope/machine.go
@@ -162,7 +162,7 @@ func (m *MachineScope) VMSpec() azure.ResourceSpecGetter {
Name: m.Name(),
Location: m.Location(),
ExtendedLocation: m.ExtendedLocation(),
- ResourceGroup: m.ResourceGroup(),
+ ResourceGroup: m.NodeResourceGroup(),
ClusterName: m.ClusterName(),
Role: m.Role(),
NICIDs: m.NICIDs(),
@@ -193,7 +193,7 @@ func (m *MachineScope) VMSpec() azure.ResourceSpecGetter {
func (m *MachineScope) TagsSpecs() []azure.TagsSpec {
return []azure.TagsSpec{
{
- Scope: azure.VMID(m.SubscriptionID(), m.ResourceGroup(), m.Name()),
+ Scope: azure.VMID(m.SubscriptionID(), m.NodeResourceGroup(), m.Name()),
Tags: m.AdditionalTags(),
Annotation: azure.VMTagsLastAppliedAnnotation,
},
@@ -206,7 +206,7 @@ func (m *MachineScope) PublicIPSpecs() []azure.ResourceSpecGetter {
if m.AzureMachine.Spec.AllocatePublicIP {
specs = append(specs, &publicips.PublicIPSpec{
Name: azure.GenerateNodePublicIPName(m.Name()),
- ResourceGroup: m.ResourceGroup(),
+ ResourceGroup: m.NodeResourceGroup(),
ClusterName: m.ClusterName(),
DNSName: "", // Set to default value
IsIPv6: false, // Set to default value
@@ -225,13 +225,13 @@ func (m *MachineScope) InboundNatSpecs() []azure.ResourceSpecGetter {
if m.Role() == infrav1.ControlPlane {
spec := &inboundnatrules.InboundNatSpec{
Name: m.Name(),
- ResourceGroup: m.ResourceGroup(),
+ ResourceGroup: m.NodeResourceGroup(),
LoadBalancerName: m.APIServerLBName(),
FrontendIPConfigurationID: nil,
}
if frontEndIPs := m.APIServerLB().FrontendIPs; len(frontEndIPs) > 0 {
ipConfig := frontEndIPs[0].Name
- id := azure.FrontendIPConfigID(m.SubscriptionID(), m.ResourceGroup(), m.APIServerLBName(), ipConfig)
+ id := azure.FrontendIPConfigID(m.SubscriptionID(), m.NodeResourceGroup(), m.APIServerLBName(), ipConfig)
spec.FrontendIPConfigurationID = ptr.To(id)
}
@@ -260,7 +260,7 @@ func (m *MachineScope) NICSpecs() []azure.ResourceSpecGetter {
func (m *MachineScope) BuildNICSpec(nicName string, infrav1NetworkInterface infrav1.NetworkInterface, primaryNetworkInterface bool) *networkinterfaces.NICSpec {
spec := &networkinterfaces.NICSpec{
Name: nicName,
- ResourceGroup: m.ResourceGroup(),
+ ResourceGroup: m.NodeResourceGroup(),
Location: m.Location(),
ExtendedLocation: m.ExtendedLocation(),
SubscriptionID: m.SubscriptionID(),
@@ -328,13 +328,13 @@ func (m *MachineScope) DiskSpecs() []azure.ResourceSpecGetter {
diskSpecs := make([]azure.ResourceSpecGetter, 1+len(m.AzureMachine.Spec.DataDisks))
diskSpecs[0] = &disks.DiskSpec{
Name: azure.GenerateOSDiskName(m.Name()),
- ResourceGroup: m.ResourceGroup(),
+ ResourceGroup: m.NodeResourceGroup(),
}
for i, dd := range m.AzureMachine.Spec.DataDisks {
diskSpecs[i+1] = &disks.DiskSpec{
Name: azure.GenerateDataDiskName(m.Name(), dd.NameSuffix),
- ResourceGroup: m.ResourceGroup(),
+ ResourceGroup: m.NodeResourceGroup(),
}
}
return diskSpecs
@@ -348,7 +348,7 @@ func (m *MachineScope) RoleAssignmentSpecs(principalID *string) []azure.Resource
Name: m.SystemAssignedIdentityName(),
MachineName: m.Name(),
ResourceType: azure.VirtualMachine,
- ResourceGroup: m.ResourceGroup(),
+ ResourceGroup: m.NodeResourceGroup(),
Scope: m.SystemAssignedIdentityScope(),
RoleDefinitionID: m.SystemAssignedIdentityDefinitionID(),
PrincipalID: principalID,
@@ -382,7 +382,7 @@ func (m *MachineScope) VMExtensionSpecs() []azure.ResourceSpecGetter {
Settings: extension.Settings,
ProtectedSettings: extension.ProtectedSettings,
},
- ResourceGroup: m.ResourceGroup(),
+ ResourceGroup: m.NodeResourceGroup(),
Location: m.Location(),
})
}
@@ -393,7 +393,7 @@ func (m *MachineScope) VMExtensionSpecs() []azure.ResourceSpecGetter {
if bootstrapExtensionSpec != nil {
extensionSpecs = append(extensionSpecs, &vmextensions.VMExtensionSpec{
ExtensionSpec: *bootstrapExtensionSpec,
- ResourceGroup: m.ResourceGroup(),
+ ResourceGroup: m.NodeResourceGroup(),
Location: m.Location(),
})
}
@@ -482,7 +482,7 @@ func (m *MachineScope) AvailabilitySetSpec() azure.ResourceSpecGetter {
spec := &availabilitysets.AvailabilitySetSpec{
Name: availabilitySetName,
- ResourceGroup: m.ResourceGroup(),
+ ResourceGroup: m.NodeResourceGroup(),
ClusterName: m.ClusterName(),
Location: m.Location(),
SKU: nil,
@@ -524,7 +524,7 @@ func (m *MachineScope) AvailabilitySet() (string, bool) {
func (m *MachineScope) AvailabilitySetID() string {
var asID string
if asName, ok := m.AvailabilitySet(); ok {
- asID = azure.AvailabilitySetID(m.SubscriptionID(), m.ResourceGroup(), asName)
+ asID = azure.AvailabilitySetID(m.SubscriptionID(), m.NodeResourceGroup(), asName)
}
return asID
}
diff --git a/azure/scope/machinepool.go b/azure/scope/machinepool.go
index 0a2a0929f16..3e85e089523 100644
--- a/azure/scope/machinepool.go
+++ b/azure/scope/machinepool.go
@@ -179,7 +179,7 @@ func (m *MachinePoolScope) ScaleSetSpec(ctx context.Context) azure.ResourceSpecG
spec := &scalesets.ScaleSetSpec{
Name: m.Name(),
- ResourceGroup: m.ResourceGroup(),
+ ResourceGroup: m.NodeResourceGroup(),
Size: m.AzureMachinePool.Spec.Template.VMSize,
Capacity: int64(ptr.Deref[int32](m.MachinePool.Spec.Replicas, 0)),
SSHKeyData: m.AzureMachinePool.Spec.Template.SSHPublicKey,
@@ -741,7 +741,7 @@ func (m *MachinePoolScope) RoleAssignmentSpecs(principalID *string) []azure.Reso
roles[0] = &roleassignments.RoleAssignmentSpec{
Name: m.SystemAssignedIdentityName(),
MachineName: m.Name(),
- ResourceGroup: m.ResourceGroup(),
+ ResourceGroup: m.NodeResourceGroup(),
ResourceType: azure.VirtualMachineScaleSet,
Scope: m.SystemAssignedIdentityScope(),
RoleDefinitionID: m.SystemAssignedIdentityDefinitionID(),
@@ -777,7 +777,7 @@ func (m *MachinePoolScope) VMSSExtensionSpecs() []azure.ResourceSpecGetter {
Settings: extension.Settings,
ProtectedSettings: extension.ProtectedSettings,
},
- ResourceGroup: m.ResourceGroup(),
+ ResourceGroup: m.NodeResourceGroup(),
})
}
@@ -787,7 +787,7 @@ func (m *MachinePoolScope) VMSSExtensionSpecs() []azure.ResourceSpecGetter {
if bootstrapExtensionSpec != nil {
extensionSpecs = append(extensionSpecs, &scalesets.VMSSExtensionSpec{
ExtensionSpec: *bootstrapExtensionSpec,
- ResourceGroup: m.ResourceGroup(),
+ ResourceGroup: m.NodeResourceGroup(),
})
}
diff --git a/azure/scope/machinepoolmachine.go b/azure/scope/machinepoolmachine.go
index 918004e13cf..5183303b643 100644
--- a/azure/scope/machinepoolmachine.go
+++ b/azure/scope/machinepoolmachine.go
@@ -157,7 +157,7 @@ func (s *MachinePoolMachineScope) ScaleSetVMSpec() azure.ResourceSpecGetter {
spec := &scalesetvms.ScaleSetVMSpec{
Name: s.Name(),
InstanceID: s.InstanceID(),
- ResourceGroup: s.ResourceGroup(),
+ ResourceGroup: s.NodeResourceGroup(),
ScaleSetName: s.ScaleSetName(),
ProviderID: s.ProviderID(),
IsFlex: s.OrchestrationMode() == infrav1.FlexibleOrchestrationMode,
diff --git a/azure/scope/managedcontrolplane.go b/azure/scope/managedcontrolplane.go
index c52a4c5c9e9..50f4c9cff4f 100644
--- a/azure/scope/managedcontrolplane.go
+++ b/azure/scope/managedcontrolplane.go
@@ -19,14 +19,19 @@ package scope
import (
"context"
"encoding/json"
+ "fmt"
"strings"
"time"
asoresourcesv1 "github.com/Azure/azure-service-operator/v2/api/resources/v1api20200601"
"github.com/pkg/errors"
"golang.org/x/mod/semver"
+ "gopkg.in/yaml.v3"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/types"
+ clientcmdapi "k8s.io/client-go/tools/clientcmd/api"
+ bootstrapapi "k8s.io/cluster-bootstrap/token/api"
"k8s.io/utils/ptr"
infrav1 "sigs.k8s.io/cluster-api-provider-azure/api/v1beta1"
"sigs.k8s.io/cluster-api-provider-azure/azure"
@@ -39,13 +44,19 @@ import (
"sigs.k8s.io/cluster-api-provider-azure/util/maps"
"sigs.k8s.io/cluster-api-provider-azure/util/tele"
clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"
+ "sigs.k8s.io/cluster-api/controllers/remote"
"sigs.k8s.io/cluster-api/util/conditions"
"sigs.k8s.io/cluster-api/util/patch"
"sigs.k8s.io/cluster-api/util/secret"
"sigs.k8s.io/controller-runtime/pkg/client"
+ "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
)
-const resourceHealthWarningInitialGracePeriod = 1 * time.Hour
+const (
+ resourceHealthWarningInitialGracePeriod = 1 * time.Hour
+ // managedControlPlaneScopeName is the sourceName, or more specifically the UserAgent, of client used to store the Cluster Info configmap.
+ managedControlPlaneScopeName = "azuremanagedcontrolplane-scope"
+)
// ManagedControlPlaneScopeParams defines the input parameters used to create a new managed
// control plane.
@@ -139,6 +150,11 @@ func (s *ManagedControlPlaneScope) GetClient() client.Client {
return s.Client
}
+// GetDeletionTimestamp returns the deletion timestamp of the cluster.
+func (s *ManagedControlPlaneScope) GetDeletionTimestamp() *metav1.Time {
+ return s.Cluster.DeletionTimestamp
+}
+
// ResourceGroup returns the managed control plane's resource group.
func (s *ManagedControlPlaneScope) ResourceGroup() string {
if s.ControlPlane == nil {
@@ -418,7 +434,7 @@ func (s *ManagedControlPlaneScope) APIServerLBName() string {
}
// APIServerLBPoolName returns the API Server LB backend pool name.
-func (s *ManagedControlPlaneScope) APIServerLBPoolName(_ string) string {
+func (s *ManagedControlPlaneScope) APIServerLBPoolName() string {
return "" // does not apply for AKS
}
@@ -682,6 +698,64 @@ func (s *ManagedControlPlaneScope) SetUserKubeconfigData(kubeConfigData []byte)
s.userKubeConfigData = kubeConfigData
}
+// MakeClusterCA returns a cluster CA Secret for the managed control plane.
+func (s *ManagedControlPlaneScope) MakeClusterCA() *corev1.Secret {
+ return &corev1.Secret{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: secret.Name(s.Cluster.Name, secret.ClusterCA),
+ Namespace: s.Cluster.Namespace,
+ OwnerReferences: []metav1.OwnerReference{
+ *metav1.NewControllerRef(s.ControlPlane, infrav1.GroupVersion.WithKind("AzureManagedControlPlane")),
+ },
+ },
+ }
+}
+
+// StoreClusterInfo stores the discovery cluster-info configmap in the kube-public namespace on the AKS cluster so kubeadm can access it to join nodes.
+func (s *ManagedControlPlaneScope) StoreClusterInfo(ctx context.Context, caData []byte) error {
+ remoteclient, err := remote.NewClusterClient(ctx, managedControlPlaneScopeName, s.Client, types.NamespacedName{
+ Namespace: s.Cluster.Namespace,
+ Name: s.Cluster.Name,
+ })
+ if err != nil {
+ return errors.Wrap(err, "failed to create remote cluster kubeclient")
+ }
+
+ discoveryFile := clientcmdapi.NewConfig()
+ discoveryFile.Clusters[""] = &clientcmdapi.Cluster{
+ CertificateAuthorityData: caData,
+ Server: fmt.Sprintf(
+ "%s:%d",
+ s.ControlPlane.Spec.ControlPlaneEndpoint.Host,
+ s.ControlPlane.Spec.ControlPlaneEndpoint.Port,
+ ),
+ }
+
+ data, err := yaml.Marshal(&discoveryFile)
+ if err != nil {
+ return errors.Wrap(err, "failed to serialize cluster-info to yaml")
+ }
+
+ clusterInfo := &corev1.ConfigMap{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: bootstrapapi.ConfigMapClusterInfo,
+ Namespace: metav1.NamespacePublic,
+ },
+ Data: map[string]string{
+ bootstrapapi.KubeConfigKey: string(data),
+ },
+ }
+
+ if _, err := controllerutil.CreateOrUpdate(ctx, remoteclient, clusterInfo, func() error {
+ clusterInfo.Data[bootstrapapi.KubeConfigKey] = string(data)
+ return nil
+ }); err != nil {
+ return errors.Wrapf(err, "failed to reconcile certificate authority data secret for cluster")
+ }
+
+ return nil
+}
+
// SetKubeletIdentity sets the ID of the user-assigned identity for kubelet if not already set.
func (s *ManagedControlPlaneScope) SetKubeletIdentity(id string) {
s.ControlPlane.Spec.KubeletUserAssignedIdentity = id
diff --git a/azure/services/availabilitysets/mock_availabilitysets/availabilitysets_mock.go b/azure/services/availabilitysets/mock_availabilitysets/availabilitysets_mock.go
index 88e7266e3f1..6a379dd1358 100644
--- a/azure/services/availabilitysets/mock_availabilitysets/availabilitysets_mock.go
+++ b/azure/services/availabilitysets/mock_availabilitysets/availabilitysets_mock.go
@@ -293,6 +293,20 @@ func (mr *MockAvailabilitySetScopeMockRecorder) Location() *gomock.Call {
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Location", reflect.TypeOf((*MockAvailabilitySetScope)(nil).Location))
}
+// NodeResourceGroup mocks base method.
+func (m *MockAvailabilitySetScope) NodeResourceGroup() string {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "NodeResourceGroup")
+ ret0, _ := ret[0].(string)
+ return ret0
+}
+
+// NodeResourceGroup indicates an expected call of NodeResourceGroup.
+func (mr *MockAvailabilitySetScopeMockRecorder) NodeResourceGroup() *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NodeResourceGroup", reflect.TypeOf((*MockAvailabilitySetScope)(nil).NodeResourceGroup))
+}
+
// ResourceGroup mocks base method.
func (m *MockAvailabilitySetScope) ResourceGroup() string {
m.ctrl.T.Helper()
diff --git a/azure/services/bastionhosts/mocks_bastionhosts/bastionhosts_mock.go b/azure/services/bastionhosts/mocks_bastionhosts/bastionhosts_mock.go
index cd1c027da24..940a83d120e 100644
--- a/azure/services/bastionhosts/mocks_bastionhosts/bastionhosts_mock.go
+++ b/azure/services/bastionhosts/mocks_bastionhosts/bastionhosts_mock.go
@@ -29,9 +29,11 @@ import (
azcore "github.com/Azure/azure-sdk-for-go/sdk/azcore"
gomock "go.uber.org/mock/gomock"
+ v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
v1beta1 "sigs.k8s.io/cluster-api-provider-azure/api/v1beta1"
azure "sigs.k8s.io/cluster-api-provider-azure/azure"
v1beta10 "sigs.k8s.io/cluster-api/api/v1beta1"
+ client "sigs.k8s.io/controller-runtime/pkg/client"
)
// MockBastionScope is a mock of BastionScope interface.
@@ -321,6 +323,34 @@ func (mr *MockBastionScopeMockRecorder) FailureDomains() *gomock.Call {
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "FailureDomains", reflect.TypeOf((*MockBastionScope)(nil).FailureDomains))
}
+// GetClient mocks base method.
+func (m *MockBastionScope) GetClient() client.Client {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "GetClient")
+ ret0, _ := ret[0].(client.Client)
+ return ret0
+}
+
+// GetClient indicates an expected call of GetClient.
+func (mr *MockBastionScopeMockRecorder) GetClient() *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetClient", reflect.TypeOf((*MockBastionScope)(nil).GetClient))
+}
+
+// GetDeletionTimestamp mocks base method.
+func (m *MockBastionScope) GetDeletionTimestamp() *v1.Time {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "GetDeletionTimestamp")
+ ret0, _ := ret[0].(*v1.Time)
+ return ret0
+}
+
+// GetDeletionTimestamp indicates an expected call of GetDeletionTimestamp.
+func (mr *MockBastionScopeMockRecorder) GetDeletionTimestamp() *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetDeletionTimestamp", reflect.TypeOf((*MockBastionScope)(nil).GetDeletionTimestamp))
+}
+
// GetLongRunningOperationState mocks base method.
func (m *MockBastionScope) GetLongRunningOperationState(arg0, arg1, arg2 string) *v1beta1.Future {
m.ctrl.T.Helper()
@@ -419,6 +449,20 @@ func (mr *MockBastionScopeMockRecorder) Location() *gomock.Call {
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Location", reflect.TypeOf((*MockBastionScope)(nil).Location))
}
+// NodeResourceGroup mocks base method.
+func (m *MockBastionScope) NodeResourceGroup() string {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "NodeResourceGroup")
+ ret0, _ := ret[0].(string)
+ return ret0
+}
+
+// NodeResourceGroup indicates an expected call of NodeResourceGroup.
+func (mr *MockBastionScopeMockRecorder) NodeResourceGroup() *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NodeResourceGroup", reflect.TypeOf((*MockBastionScope)(nil).NodeResourceGroup))
+}
+
// NodeSubnets mocks base method.
func (m *MockBastionScope) NodeSubnets() []v1beta1.SubnetSpec {
m.ctrl.T.Helper()
diff --git a/azure/services/disks/mock_disks/disks_mock.go b/azure/services/disks/mock_disks/disks_mock.go
index aea267e91df..59c428747ed 100644
--- a/azure/services/disks/mock_disks/disks_mock.go
+++ b/azure/services/disks/mock_disks/disks_mock.go
@@ -293,6 +293,20 @@ func (mr *MockDiskScopeMockRecorder) Location() *gomock.Call {
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Location", reflect.TypeOf((*MockDiskScope)(nil).Location))
}
+// NodeResourceGroup mocks base method.
+func (m *MockDiskScope) NodeResourceGroup() string {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "NodeResourceGroup")
+ ret0, _ := ret[0].(string)
+ return ret0
+}
+
+// NodeResourceGroup indicates an expected call of NodeResourceGroup.
+func (mr *MockDiskScopeMockRecorder) NodeResourceGroup() *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NodeResourceGroup", reflect.TypeOf((*MockDiskScope)(nil).NodeResourceGroup))
+}
+
// ResourceGroup mocks base method.
func (m *MockDiskScope) ResourceGroup() string {
m.ctrl.T.Helper()
diff --git a/azure/services/groups/groups.go b/azure/services/groups/groups.go
index 8e62e049830..5ceafaef0e7 100644
--- a/azure/services/groups/groups.go
+++ b/azure/services/groups/groups.go
@@ -53,23 +53,15 @@ func New(scope GroupScope) *Service {
}
}
-// IsManaged returns true if the ASO ResourceGroup was created by CAPZ,
-// meaning that the resource group's lifecycle is managed.
-func (s *Service) IsManaged(ctx context.Context, spec azure.ASOResourceSpecGetter[*asoresourcesv1.ResourceGroup]) (bool, error) {
- return aso.IsManaged(ctx, s.Scope.GetClient(), spec, s.Scope.ClusterName())
-}
-
-// ShouldDeleteIndividualResources returns false if the resource group is
+// IsManaged returns true if all resource groups are
// managed and reconciled by ASO, meaning that we can rely on a single resource
// group delete operation as opposed to deleting every individual resource.
-func (s *Service) ShouldDeleteIndividualResources(ctx context.Context) bool {
+func (s *Service) IsManaged(ctx context.Context) (bool, error) {
// Unless all resource groups are managed by CAPZ and reconciled by ASO, resources need to be deleted individually.
for _, spec := range s.Specs {
- // Since this is a best effort attempt to speed up delete, we don't fail the delete if we can't get the RG status.
- // Instead, take the long way and delete all resources one by one.
- managed, err := s.IsManaged(ctx, spec)
+ managed, err := aso.IsManaged(ctx, s.Scope.GetClient(), spec, s.Scope.ClusterName())
if err != nil || !managed {
- return true
+ return managed, err
}
// For ASO, "managed" only tells us that we're allowed to delete the ASO
@@ -78,8 +70,8 @@ func (s *Service) ShouldDeleteIndividualResources(ctx context.Context) bool {
group := spec.ResourceRef()
err = s.Scope.GetClient().Get(ctx, client.ObjectKeyFromObject(group), group)
if err != nil || group.GetAnnotations()[asoannotations.ReconcilePolicy] != string(asoannotations.ReconcilePolicyManage) {
- return true
+ return false, err
}
}
- return false
+ return true, nil
}
diff --git a/azure/services/groups/groups_test.go b/azure/services/groups/groups_test.go
index 9c21a157ce2..f549e99be98 100644
--- a/azure/services/groups/groups_test.go
+++ b/azure/services/groups/groups_test.go
@@ -33,12 +33,13 @@ import (
fakeclient "sigs.k8s.io/controller-runtime/pkg/client/fake"
)
-func TestShouldDeleteIndividualResources(t *testing.T) {
+func TestIsManaged(t *testing.T) {
tests := []struct {
- name string
- objects []client.Object
- expect func(s *mock_groups.MockGroupScopeMockRecorder)
- expected bool
+ name string
+ objects []client.Object
+ expect func(s *mock_groups.MockGroupScopeMockRecorder)
+ expected bool
+ expectedError bool
}{
{
name: "error checking if group is managed",
@@ -47,7 +48,7 @@ func TestShouldDeleteIndividualResources(t *testing.T) {
s.GroupSpecs().Return([]azure.ASOResourceSpecGetter[*asoresourcesv1.ResourceGroup]{&GroupSpec{}}).AnyTimes()
s.ClusterName().Return("").AnyTimes()
},
- expected: true,
+ expectedError: true,
},
{
name: "group is unmanaged",
@@ -71,7 +72,7 @@ func TestShouldDeleteIndividualResources(t *testing.T) {
}).AnyTimes()
s.ClusterName().Return("cluster").AnyTimes()
},
- expected: true,
+ expected: false,
},
{
name: "group is managed and has reconcile policy skip",
@@ -98,7 +99,7 @@ func TestShouldDeleteIndividualResources(t *testing.T) {
}).AnyTimes()
s.ClusterName().Return("cluster").AnyTimes()
},
- expected: true,
+ expected: false,
},
{
name: "group is managed and has reconcile policy manage",
@@ -125,7 +126,7 @@ func TestShouldDeleteIndividualResources(t *testing.T) {
}).AnyTimes()
s.ClusterName().Return("cluster").AnyTimes()
},
- expected: false,
+ expected: true,
},
}
@@ -146,8 +147,12 @@ func TestShouldDeleteIndividualResources(t *testing.T) {
scopeMock.EXPECT().GetClient().Return(ctrlClient).AnyTimes()
test.expect(scopeMock.EXPECT())
- actual := New(scopeMock).ShouldDeleteIndividualResources(context.Background())
- g.Expect(actual).To(Equal(test.expected))
+ actual, err := New(scopeMock).IsManaged(context.Background())
+ if test.expectedError {
+ g.Expect(err).To(HaveOccurred())
+ } else {
+ g.Expect(actual).To(Equal(test.expected))
+ }
})
}
}
diff --git a/azure/services/inboundnatrules/mock_inboundnatrules/inboundnatrules_mock.go b/azure/services/inboundnatrules/mock_inboundnatrules/inboundnatrules_mock.go
index 6bdf8342926..c0fefc61f34 100644
--- a/azure/services/inboundnatrules/mock_inboundnatrules/inboundnatrules_mock.go
+++ b/azure/services/inboundnatrules/mock_inboundnatrules/inboundnatrules_mock.go
@@ -307,6 +307,20 @@ func (mr *MockInboundNatScopeMockRecorder) Location() *gomock.Call {
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Location", reflect.TypeOf((*MockInboundNatScope)(nil).Location))
}
+// NodeResourceGroup mocks base method.
+func (m *MockInboundNatScope) NodeResourceGroup() string {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "NodeResourceGroup")
+ ret0, _ := ret[0].(string)
+ return ret0
+}
+
+// NodeResourceGroup indicates an expected call of NodeResourceGroup.
+func (mr *MockInboundNatScopeMockRecorder) NodeResourceGroup() *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NodeResourceGroup", reflect.TypeOf((*MockInboundNatScope)(nil).NodeResourceGroup))
+}
+
// ResourceGroup mocks base method.
func (m *MockInboundNatScope) ResourceGroup() string {
m.ctrl.T.Helper()
diff --git a/azure/services/loadbalancers/mock_loadbalancers/loadbalancers_mock.go b/azure/services/loadbalancers/mock_loadbalancers/loadbalancers_mock.go
index 2f5324a7698..e9a816f1481 100644
--- a/azure/services/loadbalancers/mock_loadbalancers/loadbalancers_mock.go
+++ b/azure/services/loadbalancers/mock_loadbalancers/loadbalancers_mock.go
@@ -29,9 +29,11 @@ import (
azcore "github.com/Azure/azure-sdk-for-go/sdk/azcore"
gomock "go.uber.org/mock/gomock"
+ v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
v1beta1 "sigs.k8s.io/cluster-api-provider-azure/api/v1beta1"
azure "sigs.k8s.io/cluster-api-provider-azure/azure"
v1beta10 "sigs.k8s.io/cluster-api/api/v1beta1"
+ client "sigs.k8s.io/controller-runtime/pkg/client"
)
// MockLBScope is a mock of LBScope interface.
@@ -307,6 +309,34 @@ func (mr *MockLBScopeMockRecorder) FailureDomains() *gomock.Call {
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "FailureDomains", reflect.TypeOf((*MockLBScope)(nil).FailureDomains))
}
+// GetClient mocks base method.
+func (m *MockLBScope) GetClient() client.Client {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "GetClient")
+ ret0, _ := ret[0].(client.Client)
+ return ret0
+}
+
+// GetClient indicates an expected call of GetClient.
+func (mr *MockLBScopeMockRecorder) GetClient() *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetClient", reflect.TypeOf((*MockLBScope)(nil).GetClient))
+}
+
+// GetDeletionTimestamp mocks base method.
+func (m *MockLBScope) GetDeletionTimestamp() *v1.Time {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "GetDeletionTimestamp")
+ ret0, _ := ret[0].(*v1.Time)
+ return ret0
+}
+
+// GetDeletionTimestamp indicates an expected call of GetDeletionTimestamp.
+func (mr *MockLBScopeMockRecorder) GetDeletionTimestamp() *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetDeletionTimestamp", reflect.TypeOf((*MockLBScope)(nil).GetDeletionTimestamp))
+}
+
// GetLongRunningOperationState mocks base method.
func (m *MockLBScope) GetLongRunningOperationState(arg0, arg1, arg2 string) *v1beta1.Future {
m.ctrl.T.Helper()
@@ -419,6 +449,20 @@ func (mr *MockLBScopeMockRecorder) Location() *gomock.Call {
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Location", reflect.TypeOf((*MockLBScope)(nil).Location))
}
+// NodeResourceGroup mocks base method.
+func (m *MockLBScope) NodeResourceGroup() string {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "NodeResourceGroup")
+ ret0, _ := ret[0].(string)
+ return ret0
+}
+
+// NodeResourceGroup indicates an expected call of NodeResourceGroup.
+func (mr *MockLBScopeMockRecorder) NodeResourceGroup() *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NodeResourceGroup", reflect.TypeOf((*MockLBScope)(nil).NodeResourceGroup))
+}
+
// NodeSubnets mocks base method.
func (m *MockLBScope) NodeSubnets() []v1beta1.SubnetSpec {
m.ctrl.T.Helper()
diff --git a/azure/services/managedclusters/managedclusters.go b/azure/services/managedclusters/managedclusters.go
index c66c8989a4e..a0ce50de144 100644
--- a/azure/services/managedclusters/managedclusters.go
+++ b/azure/services/managedclusters/managedclusters.go
@@ -57,6 +57,8 @@ type ManagedClusterScope interface {
IsAADEnabled() bool
AreLocalAccountsDisabled() bool
SetOIDCIssuerProfileStatus(*infrav1.OIDCIssuerProfileStatus)
+ MakeClusterCA() *corev1.Secret
+ StoreClusterInfo(context.Context, []byte) error
}
// Service provides operations on azure resources.
diff --git a/azure/services/managedclusters/mock_managedclusters/managedclusters_mock.go b/azure/services/managedclusters/mock_managedclusters/managedclusters_mock.go
index 671e2b18aad..1874e2c51f7 100644
--- a/azure/services/managedclusters/mock_managedclusters/managedclusters_mock.go
+++ b/azure/services/managedclusters/mock_managedclusters/managedclusters_mock.go
@@ -25,6 +25,7 @@ limitations under the License.
package mock_managedclusters
import (
+ context "context"
reflect "reflect"
azcore "github.com/Azure/azure-sdk-for-go/sdk/azcore"
@@ -210,6 +211,20 @@ func (mr *MockManagedClusterScopeMockRecorder) IsAADEnabled() *gomock.Call {
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "IsAADEnabled", reflect.TypeOf((*MockManagedClusterScope)(nil).IsAADEnabled))
}
+// MakeClusterCA mocks base method.
+func (m *MockManagedClusterScope) MakeClusterCA() *v1.Secret {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "MakeClusterCA")
+ ret0, _ := ret[0].(*v1.Secret)
+ return ret0
+}
+
+// MakeClusterCA indicates an expected call of MakeClusterCA.
+func (mr *MockManagedClusterScopeMockRecorder) MakeClusterCA() *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MakeClusterCA", reflect.TypeOf((*MockManagedClusterScope)(nil).MakeClusterCA))
+}
+
// MakeEmptyKubeConfigSecret mocks base method.
func (m *MockManagedClusterScope) MakeEmptyKubeConfigSecret() v1.Secret {
m.ctrl.T.Helper()
@@ -310,6 +325,20 @@ func (mr *MockManagedClusterScopeMockRecorder) SetUserKubeconfigData(arg0 any) *
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetUserKubeconfigData", reflect.TypeOf((*MockManagedClusterScope)(nil).SetUserKubeconfigData), arg0)
}
+// StoreClusterInfo mocks base method.
+func (m *MockManagedClusterScope) StoreClusterInfo(arg0 context.Context, arg1 []byte) error {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "StoreClusterInfo", arg0, arg1)
+ ret0, _ := ret[0].(error)
+ return ret0
+}
+
+// StoreClusterInfo indicates an expected call of StoreClusterInfo.
+func (mr *MockManagedClusterScopeMockRecorder) StoreClusterInfo(arg0, arg1 any) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StoreClusterInfo", reflect.TypeOf((*MockManagedClusterScope)(nil).StoreClusterInfo), arg0, arg1)
+}
+
// SubscriptionID mocks base method.
func (m *MockManagedClusterScope) SubscriptionID() string {
m.ctrl.T.Helper()
diff --git a/azure/services/natgateways/mock_natgateways/natgateways_mock.go b/azure/services/natgateways/mock_natgateways/natgateways_mock.go
index 75b9ae1cdef..72a4e11b2fa 100644
--- a/azure/services/natgateways/mock_natgateways/natgateways_mock.go
+++ b/azure/services/natgateways/mock_natgateways/natgateways_mock.go
@@ -30,6 +30,7 @@ import (
azcore "github.com/Azure/azure-sdk-for-go/sdk/azcore"
v1api20220701 "github.com/Azure/azure-service-operator/v2/api/network/v1api20220701"
gomock "go.uber.org/mock/gomock"
+ v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
v1beta1 "sigs.k8s.io/cluster-api-provider-azure/api/v1beta1"
azure "sigs.k8s.io/cluster-api-provider-azure/azure"
v1beta10 "sigs.k8s.io/cluster-api/api/v1beta1"
@@ -323,6 +324,20 @@ func (mr *MockNatGatewayScopeMockRecorder) GetClient() *gomock.Call {
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetClient", reflect.TypeOf((*MockNatGatewayScope)(nil).GetClient))
}
+// GetDeletionTimestamp mocks base method.
+func (m *MockNatGatewayScope) GetDeletionTimestamp() *v1.Time {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "GetDeletionTimestamp")
+ ret0, _ := ret[0].(*v1.Time)
+ return ret0
+}
+
+// GetDeletionTimestamp indicates an expected call of GetDeletionTimestamp.
+func (mr *MockNatGatewayScopeMockRecorder) GetDeletionTimestamp() *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetDeletionTimestamp", reflect.TypeOf((*MockNatGatewayScope)(nil).GetDeletionTimestamp))
+}
+
// GetLongRunningOperationState mocks base method.
func (m *MockNatGatewayScope) GetLongRunningOperationState(arg0, arg1, arg2 string) *v1beta1.Future {
m.ctrl.T.Helper()
@@ -435,6 +450,20 @@ func (mr *MockNatGatewayScopeMockRecorder) NatGatewaySpecs() *gomock.Call {
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NatGatewaySpecs", reflect.TypeOf((*MockNatGatewayScope)(nil).NatGatewaySpecs))
}
+// NodeResourceGroup mocks base method.
+func (m *MockNatGatewayScope) NodeResourceGroup() string {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "NodeResourceGroup")
+ ret0, _ := ret[0].(string)
+ return ret0
+}
+
+// NodeResourceGroup indicates an expected call of NodeResourceGroup.
+func (mr *MockNatGatewayScopeMockRecorder) NodeResourceGroup() *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NodeResourceGroup", reflect.TypeOf((*MockNatGatewayScope)(nil).NodeResourceGroup))
+}
+
// NodeSubnets mocks base method.
func (m *MockNatGatewayScope) NodeSubnets() []v1beta1.SubnetSpec {
m.ctrl.T.Helper()
diff --git a/azure/services/networkinterfaces/mock_networkinterfaces/networkinterfaces_mock.go b/azure/services/networkinterfaces/mock_networkinterfaces/networkinterfaces_mock.go
index 53c3b4043c5..de024eab203 100644
--- a/azure/services/networkinterfaces/mock_networkinterfaces/networkinterfaces_mock.go
+++ b/azure/services/networkinterfaces/mock_networkinterfaces/networkinterfaces_mock.go
@@ -293,6 +293,20 @@ func (mr *MockNICScopeMockRecorder) NICSpecs() *gomock.Call {
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NICSpecs", reflect.TypeOf((*MockNICScope)(nil).NICSpecs))
}
+// NodeResourceGroup mocks base method.
+func (m *MockNICScope) NodeResourceGroup() string {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "NodeResourceGroup")
+ ret0, _ := ret[0].(string)
+ return ret0
+}
+
+// NodeResourceGroup indicates an expected call of NodeResourceGroup.
+func (mr *MockNICScopeMockRecorder) NodeResourceGroup() *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NodeResourceGroup", reflect.TypeOf((*MockNICScope)(nil).NodeResourceGroup))
+}
+
// ResourceGroup mocks base method.
func (m *MockNICScope) ResourceGroup() string {
m.ctrl.T.Helper()
diff --git a/azure/services/privatedns/mock_privatedns/privatedns_mock.go b/azure/services/privatedns/mock_privatedns/privatedns_mock.go
index 1ffac3f26d6..17520ff0a3e 100644
--- a/azure/services/privatedns/mock_privatedns/privatedns_mock.go
+++ b/azure/services/privatedns/mock_privatedns/privatedns_mock.go
@@ -279,6 +279,20 @@ func (mr *MockScopeMockRecorder) Location() *gomock.Call {
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Location", reflect.TypeOf((*MockScope)(nil).Location))
}
+// NodeResourceGroup mocks base method.
+func (m *MockScope) NodeResourceGroup() string {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "NodeResourceGroup")
+ ret0, _ := ret[0].(string)
+ return ret0
+}
+
+// NodeResourceGroup indicates an expected call of NodeResourceGroup.
+func (mr *MockScopeMockRecorder) NodeResourceGroup() *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NodeResourceGroup", reflect.TypeOf((*MockScope)(nil).NodeResourceGroup))
+}
+
// PrivateDNSSpec mocks base method.
func (m *MockScope) PrivateDNSSpec() (azure.ResourceSpecGetter, []azure.ResourceSpecGetter, []azure.ResourceSpecGetter) {
m.ctrl.T.Helper()
diff --git a/azure/services/publicips/mock_publicips/publicips_mock.go b/azure/services/publicips/mock_publicips/publicips_mock.go
index 98b46eb3215..8acd5e526f8 100644
--- a/azure/services/publicips/mock_publicips/publicips_mock.go
+++ b/azure/services/publicips/mock_publicips/publicips_mock.go
@@ -279,6 +279,20 @@ func (mr *MockPublicIPScopeMockRecorder) Location() *gomock.Call {
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Location", reflect.TypeOf((*MockPublicIPScope)(nil).Location))
}
+// NodeResourceGroup mocks base method.
+func (m *MockPublicIPScope) NodeResourceGroup() string {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "NodeResourceGroup")
+ ret0, _ := ret[0].(string)
+ return ret0
+}
+
+// NodeResourceGroup indicates an expected call of NodeResourceGroup.
+func (mr *MockPublicIPScopeMockRecorder) NodeResourceGroup() *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NodeResourceGroup", reflect.TypeOf((*MockPublicIPScope)(nil).NodeResourceGroup))
+}
+
// PublicIPSpecs mocks base method.
func (m *MockPublicIPScope) PublicIPSpecs() []azure.ResourceSpecGetter {
m.ctrl.T.Helper()
diff --git a/azure/services/scalesets/mock_scalesets/scalesets_mock.go b/azure/services/scalesets/mock_scalesets/scalesets_mock.go
index 87723294ea6..00b76b13839 100644
--- a/azure/services/scalesets/mock_scalesets/scalesets_mock.go
+++ b/azure/services/scalesets/mock_scalesets/scalesets_mock.go
@@ -280,6 +280,20 @@ func (mr *MockScaleSetScopeMockRecorder) Location() *gomock.Call {
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Location", reflect.TypeOf((*MockScaleSetScope)(nil).Location))
}
+// NodeResourceGroup mocks base method.
+func (m *MockScaleSetScope) NodeResourceGroup() string {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "NodeResourceGroup")
+ ret0, _ := ret[0].(string)
+ return ret0
+}
+
+// NodeResourceGroup indicates an expected call of NodeResourceGroup.
+func (mr *MockScaleSetScopeMockRecorder) NodeResourceGroup() *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NodeResourceGroup", reflect.TypeOf((*MockScaleSetScope)(nil).NodeResourceGroup))
+}
+
// ReconcileReplicas mocks base method.
func (m *MockScaleSetScope) ReconcileReplicas(arg0 context.Context, arg1 *azure.VMSS) error {
m.ctrl.T.Helper()
diff --git a/azure/services/scalesetvms/mock_scalesetvms/scalesetvms_mock.go b/azure/services/scalesetvms/mock_scalesetvms/scalesetvms_mock.go
index 8f458533f28..1a5db75b072 100644
--- a/azure/services/scalesetvms/mock_scalesetvms/scalesetvms_mock.go
+++ b/azure/services/scalesetvms/mock_scalesetvms/scalesetvms_mock.go
@@ -279,6 +279,20 @@ func (mr *MockScaleSetVMScopeMockRecorder) Location() *gomock.Call {
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Location", reflect.TypeOf((*MockScaleSetVMScope)(nil).Location))
}
+// NodeResourceGroup mocks base method.
+func (m *MockScaleSetVMScope) NodeResourceGroup() string {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "NodeResourceGroup")
+ ret0, _ := ret[0].(string)
+ return ret0
+}
+
+// NodeResourceGroup indicates an expected call of NodeResourceGroup.
+func (mr *MockScaleSetVMScopeMockRecorder) NodeResourceGroup() *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NodeResourceGroup", reflect.TypeOf((*MockScaleSetVMScope)(nil).NodeResourceGroup))
+}
+
// ResourceGroup mocks base method.
func (m *MockScaleSetVMScope) ResourceGroup() string {
m.ctrl.T.Helper()
diff --git a/config/crd/bases/infrastructure.cluster.x-k8s.io_azuremanagedcontrolplanes.yaml b/config/crd/bases/infrastructure.cluster.x-k8s.io_azuremanagedcontrolplanes.yaml
index 98e3c44cfed..9766a60cf37 100644
--- a/config/crd/bases/infrastructure.cluster.x-k8s.io_azuremanagedcontrolplanes.yaml
+++ b/config/crd/bases/infrastructure.cluster.x-k8s.io_azuremanagedcontrolplanes.yaml
@@ -381,6 +381,7 @@ spec:
enum:
- azure
- kubenet
+ - none
type: string
networkPluginMode:
description: NetworkPluginMode is the mode the network plugin should
diff --git a/controllers/azurecluster_reconciler.go b/controllers/azurecluster_reconciler.go
index 8d45b62765f..d18fc5f3cca 100644
--- a/controllers/azurecluster_reconciler.go
+++ b/controllers/azurecluster_reconciler.go
@@ -174,6 +174,7 @@ func (s *azureClusterService) Delete(ctx context.Context) error {
if err != nil {
return errors.Wrap(err, "failed to get group service")
}
+
// Delete the entire resource group directly.
if err := groupSvc.Delete(ctx); err != nil {
return errors.Wrap(err, "failed to delete resource group")
@@ -191,15 +192,6 @@ func (s *azureClusterService) Delete(ctx context.Context) error {
return nil
}
-func (s *azureClusterService) getService(name string) (azure.ServiceReconciler, error) {
- for _, service := range s.services {
- if service.Name() == name {
- return service, nil
- }
- }
- return nil, errors.Errorf("service %s not found", name)
-}
-
// setFailureDomainsForLocation sets the AzureCluster Status failure domains based on which Azure Availability Zones are available in the cluster location.
// Note that this is not done in a webhook as it requires API calls to fetch the availability zones.
func (s *azureClusterService) setFailureDomainsForLocation(ctx context.Context) error {
@@ -220,3 +212,12 @@ func (s *azureClusterService) setFailureDomainsForLocation(ctx context.Context)
return nil
}
+
+func (s *azureClusterService) getService(name string) (azure.ServiceReconciler, error) {
+ for _, service := range s.services {
+ if service.Name() == name {
+ return service, nil
+ }
+ }
+ return nil, errors.Errorf("service %s not found", name)
+}
diff --git a/controllers/azurejson_machinepool_controller.go b/controllers/azurejson_machinepool_controller.go
index 7019785e405..c49e7192131 100644
--- a/controllers/azurejson_machinepool_controller.go
+++ b/controllers/azurejson_machinepool_controller.go
@@ -25,11 +25,9 @@ import (
corev1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
- "k8s.io/apimachinery/pkg/types"
"k8s.io/client-go/tools/record"
"k8s.io/utils/ptr"
infrav1 "sigs.k8s.io/cluster-api-provider-azure/api/v1beta1"
- "sigs.k8s.io/cluster-api-provider-azure/azure/scope"
"sigs.k8s.io/cluster-api-provider-azure/azure/services/identities"
infrav1exp "sigs.k8s.io/cluster-api-provider-azure/exp/api/v1beta1"
"sigs.k8s.io/cluster-api-provider-azure/util/reconciler"
@@ -138,38 +136,14 @@ func (r *AzureJSONMachinePoolReconciler) Reconcile(ctx context.Context, req ctrl
log = log.WithValues("cluster", cluster.Name)
- _, kind := infrav1.GroupVersion.WithKind("AzureCluster").ToAPIVersionAndKind()
-
- // only look at azure clusters
if cluster.Spec.InfrastructureRef == nil {
log.Info("infra ref is nil")
return ctrl.Result{}, nil
}
- if cluster.Spec.InfrastructureRef.Kind != kind {
- log.WithValues("kind", cluster.Spec.InfrastructureRef.Kind).Info("infra ref was not an AzureCluster")
- return ctrl.Result{}, nil
- }
-
- // fetch the corresponding azure cluster
- azureCluster := &infrav1.AzureCluster{}
- azureClusterName := types.NamespacedName{
- Namespace: req.Namespace,
- Name: cluster.Spec.InfrastructureRef.Name,
- }
-
- if err := r.Get(ctx, azureClusterName, azureCluster); err != nil {
- log.Error(err, "failed to fetch AzureCluster")
- return reconcile.Result{}, err
- }
- // Create the scope.
- clusterScope, err := scope.NewClusterScope(ctx, scope.ClusterScopeParams{
- Client: r.Client,
- Cluster: cluster,
- AzureCluster: azureCluster,
- })
+ clusterScope, err := GetClusterScoper(ctx, log, r.Client, cluster)
if err != nil {
- return reconcile.Result{}, errors.Wrap(err, "failed to create scope")
+ return reconcile.Result{}, errors.Wrapf(err, "failed to create cluster scope for cluster %s/%s", cluster.Namespace, cluster.Name)
}
// Construct secret for this machine
diff --git a/controllers/azurejson_machinepool_controller_test.go b/controllers/azurejson_machinepool_controller_test.go
index 59dcbd71479..709be82d739 100644
--- a/controllers/azurejson_machinepool_controller_test.go
+++ b/controllers/azurejson_machinepool_controller_test.go
@@ -137,7 +137,7 @@ func TestAzureJSONPoolReconciler(t *testing.T) {
azureMachinePool,
},
fail: true,
- err: "azureclusters.infrastructure.cluster.x-k8s.io \"my-azure-cluster\" not found",
+ err: "failed to create cluster scope for cluster /my-cluster: azureclusters.infrastructure.cluster.x-k8s.io \"my-azure-cluster\" not found",
},
"infra ref is nil": {
objects: []runtime.Object{
@@ -173,7 +173,8 @@ func TestAzureJSONPoolReconciler(t *testing.T) {
machinePool,
azureMachinePool,
},
- fail: false,
+ fail: true,
+ err: "failed to create cluster scope for cluster /my-cluster: unsupported infrastructure type \"FooCluster\", should be AzureCluster or AzureManagedCluster",
},
}
diff --git a/controllers/azuremanagedcontrolplane_reconciler.go b/controllers/azuremanagedcontrolplane_reconciler.go
index 0a0a8bd90de..707e4455532 100644
--- a/controllers/azuremanagedcontrolplane_reconciler.go
+++ b/controllers/azuremanagedcontrolplane_reconciler.go
@@ -21,6 +21,7 @@ import (
"fmt"
"github.com/pkg/errors"
+ "k8s.io/client-go/tools/clientcmd"
"sigs.k8s.io/cluster-api-provider-azure/azure"
"sigs.k8s.io/cluster-api-provider-azure/azure/scope"
"sigs.k8s.io/cluster-api-provider-azure/azure/services/groups"
@@ -156,9 +157,32 @@ func (r *azureManagedControlPlaneService) reconcileKubeconfig(ctx context.Contex
}
return nil
}); err != nil {
- return errors.Wrap(err, "failed to kubeconfig secret for cluster")
+ return errors.Wrap(err, "failed to reconcile kubeconfig secret for cluster")
}
}
+ // store cluster-info for the cluster with the admin kubeconfig.
+ kubeconfigFile, err := clientcmd.Load(kubeConfigs[0])
+ if err != nil {
+ return errors.Wrap(err, "failed to turn aks credentials into kubeconfig file struct")
+ }
+
+ cluster := kubeconfigFile.Contexts[kubeconfigFile.CurrentContext].Cluster
+ caData := kubeconfigFile.Clusters[cluster].CertificateAuthorityData
+ caSecret := r.scope.MakeClusterCA()
+ if _, err := controllerutil.CreateOrUpdate(ctx, r.kubeclient, caSecret, func() error {
+ caSecret.Data = map[string][]byte{
+ secret.TLSCrtDataName: caData,
+ secret.TLSKeyDataName: []byte("foo"),
+ }
+ return nil
+ }); err != nil {
+ return errors.Wrapf(err, "failed to reconcile certificate authority data secret for cluster")
+ }
+
+ if err := r.scope.StoreClusterInfo(ctx, caData); err != nil {
+ return errors.Wrap(err, "failed to construct cluster-info")
+ }
+
return nil
}
diff --git a/controllers/helpers.go b/controllers/helpers.go
index f0399a84091..abb1fd87a13 100644
--- a/controllers/helpers.go
+++ b/controllers/helpers.go
@@ -76,6 +76,12 @@ type (
controller.Options
Cache *coalescing.ReconcileCache
}
+
+ // ClusterScoper is a interface used by AzureMachinePools that can be owned by either an AzureManagedCluster or AzureCluster.
+ ClusterScoper interface {
+ azure.ClusterScoper
+ groups.GroupScope
+ }
)
// AzureClusterToAzureMachinesMapper creates a mapping handler to transform AzureClusters into AzureMachines. The transform
@@ -599,15 +605,18 @@ func GetAzureMachinePoolByName(ctx context.Context, c client.Client, namespace,
// ShouldDeleteIndividualResources returns false if the resource group is managed and the whole cluster is being deleted
// meaning that we can rely on a single resource group delete operation as opposed to deleting every individual VM resource.
-func ShouldDeleteIndividualResources(ctx context.Context, clusterScope *scope.ClusterScope) bool {
+func ShouldDeleteIndividualResources(ctx context.Context, cluster ClusterScoper) bool {
ctx, _, done := tele.StartSpanWithLogger(ctx, "controllers.ShouldDeleteIndividualResources")
defer done()
- if clusterScope.Cluster.DeletionTimestamp.IsZero() {
+ if cluster.GetDeletionTimestamp().IsZero() {
return true
}
- return groups.New(clusterScope).ShouldDeleteIndividualResources(ctx)
+ managed, err := groups.New(cluster).IsManaged(ctx)
+ // Since this is a best effort attempt to speed up delete, we don't fail the delete if we can't get the RG status.
+ // Instead, take the long way and delete all resources one by one.
+ return err != nil || !managed
}
// GetClusterIdentityFromRef returns the AzureClusterIdentity referenced by the AzureCluster.
@@ -1061,3 +1070,49 @@ func ClusterUpdatePauseChange(logger logr.Logger) predicate.Funcs {
func ClusterPauseChangeAndInfrastructureReady(log logr.Logger) predicate.Funcs {
return predicates.Any(log, predicates.ClusterCreateInfraReady(log), predicates.ClusterUpdateInfraReady(log), ClusterUpdatePauseChange(log))
}
+
+// GetClusterScoper returns a ClusterScoper for the given cluster using the infra ref pointing to either an AzureCluster or an AzureManagedCluster.
+func GetClusterScoper(ctx context.Context, logger logr.Logger, c client.Client, cluster *clusterv1.Cluster) (ClusterScoper, error) {
+ infraRef := cluster.Spec.InfrastructureRef
+ switch infraRef.Kind {
+ case "AzureCluster":
+ logger = logger.WithValues("AzureCluster", infraRef.Name)
+ azureClusterName := client.ObjectKey{
+ Namespace: infraRef.Namespace,
+ Name: infraRef.Name,
+ }
+ azureCluster := &infrav1.AzureCluster{}
+ if err := c.Get(ctx, azureClusterName, azureCluster); err != nil {
+ logger.V(2).Info("AzureCluster is not available yet")
+ return nil, err
+ }
+
+ // Create the cluster scope
+ return scope.NewClusterScope(ctx, scope.ClusterScopeParams{
+ Client: c,
+ Cluster: cluster,
+ AzureCluster: azureCluster,
+ })
+
+ case "AzureManagedCluster":
+ logger = logger.WithValues("AzureManagedCluster", infraRef.Name)
+ azureManagedControlPlaneName := client.ObjectKey{
+ Namespace: infraRef.Namespace,
+ Name: cluster.Spec.ControlPlaneRef.Name,
+ }
+ azureManagedControlPlane := &infrav1.AzureManagedControlPlane{}
+ if err := c.Get(ctx, azureManagedControlPlaneName, azureManagedControlPlane); err != nil {
+ logger.V(2).Info("AzureManagedControlPlane is not available yet")
+ return nil, err
+ }
+
+ // Create the control plane scope
+ return scope.NewManagedControlPlaneScope(ctx, scope.ManagedControlPlaneScopeParams{
+ Client: c,
+ Cluster: cluster,
+ ControlPlane: azureManagedControlPlane,
+ })
+ }
+
+ return nil, errors.Errorf("unsupported infrastructure type %q, should be AzureCluster or AzureManagedCluster", cluster.Spec.InfrastructureRef.Kind)
+}
diff --git a/docs/book/src/topics/managedcluster.md b/docs/book/src/topics/managedcluster.md
index c8ca75a5f1e..c7d6c519ac2 100644
--- a/docs/book/src/topics/managedcluster.md
+++ b/docs/book/src/topics/managedcluster.md
@@ -28,14 +28,11 @@ executing clusterctl.
# Kubernetes values
export CLUSTER_NAME="my-cluster"
export WORKER_MACHINE_COUNT=2
-export KUBERNETES_VERSION="v1.25.6"
+export KUBERNETES_VERSION="v1.27.3"
# Azure values
export AZURE_LOCATION="southcentralus"
export AZURE_RESOURCE_GROUP="${CLUSTER_NAME}"
-# set AZURE_SUBSCRIPTION_ID to the GUID of your subscription
-# this example uses an sdk authentication file and parses the subscriptionId with jq
-# this file may be created using
```
Create a new service principal and save to a local file:
@@ -303,18 +300,6 @@ implementation simple. If you'd like to run managed AKS cluster with CAPZ
and need an additional feature, please open a pull request or issue with
details. We're happy to help!
-Current limitations
-
-- DNS IP is hardcoded to the x.x.x.10 inside the service CIDR.
- - primarily due to lack of validation, see [#612](https://github.com/kubernetes-sigs/cluster-api-provider-azure/issues/612)
-- Only supports system managed identities.
- - We would like to support user managed identities where appropriate.
-- Only supports Standard load balancer (SLB).
- - We will not support Basic load balancer in CAPZ. SLB is generally
- the path forward in Azure.
-- Only supports Azure Active Directory Managed by Azure.
- - We will not support Legacy Azure Active Directory
-
## Best Practices
A set of best practices for managing AKS clusters is documented here: https://learn.microsoft.com/azure/aks/best-practices
@@ -398,3 +383,130 @@ spec:
namespace: default
version: v1.21.2
```
+
+## Joining self-managed VMSS nodes to an AKS control plane
+
+
+
+### Creating the MachinePool
+
+You can add a self-managed VMSS node pool to any CAPZ-managed AKS cluster by applying the following resources to the management cluster:
+
+```yaml
+apiVersion: cluster.x-k8s.io/v1beta1
+kind: MachinePool
+metadata:
+ name: ${CLUSTER_NAME}-vmss
+ namespace: default
+spec:
+ clusterName: ${CLUSTER_NAME}
+ replicas: ${WORKER_MACHINE_COUNT}
+ template:
+ spec:
+ bootstrap:
+ configRef:
+ apiVersion: bootstrap.cluster.x-k8s.io/v1beta1
+ kind: KubeadmConfig
+ name: ${CLUSTER_NAME}-vmss
+ clusterName: ${CLUSTER_NAME}
+ infrastructureRef:
+ apiVersion: infrastructure.cluster.x-k8s.io/v1beta1
+ kind: AzureMachinePool
+ name: ${CLUSTER_NAME}-vmss
+ version: ${KUBERNETES_VERSION}
+---
+apiVersion: infrastructure.cluster.x-k8s.io/v1beta1
+kind: AzureMachinePool
+metadata:
+ name: ${CLUSTER_NAME}-vmss
+ namespace: default
+spec:
+ location: ${AZURE_LOCATION}
+ strategy:
+ rollingUpdate:
+ deletePolicy: Oldest
+ maxSurge: 25%
+ maxUnavailable: 1
+ type: RollingUpdate
+ template:
+ osDisk:
+ diskSizeGB: 30
+ managedDisk:
+ storageAccountType: Premium_LRS
+ osType: Linux
+ sshPublicKey: ${AZURE_SSH_PUBLIC_KEY_B64:=""}
+ vmSize: ${AZURE_NODE_MACHINE_TYPE}
+---
+apiVersion: bootstrap.cluster.x-k8s.io/v1beta1
+kind: KubeadmConfig
+metadata:
+ name: ${CLUSTER_NAME}-vmss
+ namespace: default
+spec:
+ files:
+ - contentFrom:
+ secret:
+ key: worker-node-azure.json
+ name: ${CLUSTER_NAME}-vmss-azure-json
+ owner: root:root
+ path: /etc/kubernetes/azure.json
+ permissions: "0644"
+ - contentFrom:
+ secret:
+ key: value
+ name: ${CLUSTER_NAME}-kubeconfig
+ owner: root:root
+ path: /etc/kubernetes/admin.conf
+ permissions: "0644"
+ joinConfiguration:
+ discovery:
+ file:
+ kubeConfigPath: /etc/kubernetes/admin.conf
+ nodeRegistration:
+ kubeletExtraArgs:
+ azure-container-registry-config: /etc/kubernetes/azure.json
+ cloud-provider: external
+ name: '{{ ds.meta_data["local_hostname"] }}'
+ preKubeadmCommands:
+ - kubeadm init phase upload-config all
+ ```
+
+### Installing Addons
+
+In order for the nodes to become ready, you'll need to install Cloud Provider Azure and a CNI.
+
+AKS will install Cloud Provider Azure on the self-managed nodes as long as they have the appropriate labels. You can add the required label on the nodes by running the following command on the AKS cluster:
+
+```bash
+kubectl label node kubernetes.azure.com/cluster=
+```
+
+Repeat this for each node in the MachinePool.
+
+
+
+For the CNI, you can install the CNI of your choice. For example, to install Azure CNI, run the following command on the AKS cluster:
+
+```bash
+kubectl apply -f https://raw.githubusercontent.com/kubernetes-sigs/cluster-api-provider-azure/main/templates/addons/azure-cni-v1.yaml
+```
+
+### Notes
+
+Some notes about how this works under the hood:
+
+- CAPZ will fetch the kubeconfig for the AKS cluster and store it in a secret named `${CLUSTER_NAME}-kubeconfig` in the management cluster. That secret is then used for discovery by the `KubeadmConfig` resource.
+- You can customize the `MachinePool`, `AzureMachinePool`, and `KubeadmConfig` resources to your liking. The example above is just a starting point. Note that the key configurations to keep are in the `KubeadmConfig` resource, namely the `files`, `joinConfiguration`, and `preKubeadmCommands` sections.
+- The `KubeadmConfig` resource will be used to generate a `kubeadm join` command that will be executed on each node in the VMSS. It uses the cluster kubeconfig for discovery. The `kubeadm init phase upload-config all` is run as a preKubeadmCommand to ensure that the kubeadm and kubelet configurations are uploaded to a ConfigMap. This step would normally be done by the `kubeadm init` command, but since we're not running `kubeadm init` we need to do it manually.
diff --git a/exp/api/v1beta1/azuremachinepool_default.go b/exp/api/v1beta1/azuremachinepool_default.go
index d2baa14b173..e4009be4bcb 100644
--- a/exp/api/v1beta1/azuremachinepool_default.go
+++ b/exp/api/v1beta1/azuremachinepool_default.go
@@ -37,22 +37,9 @@ func (amp *AzureMachinePool) SetDefaults(client client.Client) error {
errs = append(errs, errors.Wrap(err, "failed to set default SSH public key"))
}
- machinePool, err := azureutil.FindParentMachinePoolWithRetry(amp.Name, client, 5)
- if err != nil {
- errs = append(errs, errors.Wrap(err, "failed to find parent machine pool"))
+ if err := amp.SetIdentityDefaults(client); err != nil {
+ errs = append(errs, errors.Wrap(err, "failed to set default managed identity defaults"))
}
-
- ownerAzureClusterName, ownerAzureClusterNamespace, err := infrav1.GetOwnerAzureClusterNameAndNamespace(client, machinePool.Spec.ClusterName, machinePool.Namespace, 5)
- if err != nil {
- errs = append(errs, errors.Wrap(err, "failed to get owner cluster"))
- }
-
- subscriptionID, err := infrav1.GetSubscriptionID(client, ownerAzureClusterName, ownerAzureClusterNamespace, 5)
- if err != nil {
- errs = append(errs, errors.Wrap(err, "failed to get subscription ID"))
- }
-
- amp.SetIdentityDefaults(subscriptionID)
amp.SetDiagnosticsDefaults()
amp.SetNetworkInterfacesDefaults()
@@ -73,14 +60,29 @@ func (amp *AzureMachinePool) SetDefaultSSHPublicKey() error {
}
// SetIdentityDefaults sets the defaults for VMSS Identity.
-func (amp *AzureMachinePool) SetIdentityDefaults(subscriptionID string) {
+func (amp *AzureMachinePool) SetIdentityDefaults(client client.Client) error {
// Ensure the deprecated fields and new fields are not populated simultaneously
if amp.Spec.RoleAssignmentName != "" && amp.Spec.SystemAssignedIdentityRole != nil && amp.Spec.SystemAssignedIdentityRole.Name != "" {
// Both the deprecated and the new fields are both set, return without changes
// and reject the request in the validating webhook which runs later.
- return
+ return nil
}
if amp.Spec.Identity == infrav1.VMIdentitySystemAssigned {
+ machinePool, err := azureutil.FindParentMachinePoolWithRetry(amp.Name, client, 5)
+ if err != nil {
+ return errors.Wrap(err, "failed to find parent machine pool")
+ }
+
+ ownerAzureClusterName, ownerAzureClusterNamespace, err := infrav1.GetOwnerAzureClusterNameAndNamespace(client, machinePool.Spec.ClusterName, machinePool.Namespace, 5)
+ if err != nil {
+ return errors.Wrap(err, "failed to get owner cluster")
+ }
+
+ subscriptionID, err := infrav1.GetSubscriptionID(client, ownerAzureClusterName, ownerAzureClusterNamespace, 5)
+ if err != nil {
+ return errors.Wrap(err, "failed to get subscription ID")
+ }
+
if amp.Spec.SystemAssignedIdentityRole == nil {
amp.Spec.SystemAssignedIdentityRole = &infrav1.SystemAssignedIdentityRole{}
}
@@ -99,6 +101,7 @@ func (amp *AzureMachinePool) SetIdentityDefaults(subscriptionID string) {
amp.Spec.SystemAssignedIdentityRole.DefinitionID = fmt.Sprintf("/subscriptions/%s/providers/Microsoft.Authorization/roleDefinitions/%s", subscriptionID, infrav1.ContributorRoleID)
}
}
+ return nil
}
// SetSpotEvictionPolicyDefaults sets the defaults for the spot VM eviction policy.
diff --git a/exp/api/v1beta1/azuremachinepool_default_test.go b/exp/api/v1beta1/azuremachinepool_default_test.go
index 3a373ba19ec..f36802d8b4f 100644
--- a/exp/api/v1beta1/azuremachinepool_default_test.go
+++ b/exp/api/v1beta1/azuremachinepool_default_test.go
@@ -22,9 +22,14 @@ import (
"github.com/google/uuid"
. "github.com/onsi/gomega"
+ corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/runtime"
"k8s.io/utils/ptr"
infrav1 "sigs.k8s.io/cluster-api-provider-azure/api/v1beta1"
+ clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"
+ expv1 "sigs.k8s.io/cluster-api/exp/api/v1beta1"
+ "sigs.k8s.io/controller-runtime/pkg/client/fake"
)
func TestAzureMachinePool_SetDefaultSSHPublicKey(t *testing.T) {
@@ -48,75 +53,141 @@ func TestAzureMachinePool_SetDefaultSSHPublicKey(t *testing.T) {
}
func TestAzureMachinePool_SetIdentityDefaults(t *testing.T) {
- g := NewWithT(t)
-
- type test struct {
- machinePool *AzureMachinePool
- }
-
fakeSubscriptionID := uuid.New().String()
fakeClusterName := "testcluster"
fakeRoleDefinitionID := "testroledefinitionid"
fakeScope := fmt.Sprintf("/subscriptions/%s/resourceGroups/%s", fakeSubscriptionID, fakeClusterName)
existingRoleAssignmentName := "42862306-e485-4319-9bf0-35dbc6f6fe9c"
- roleAssignmentExistTest := test{machinePool: &AzureMachinePool{Spec: AzureMachinePoolSpec{
- Identity: infrav1.VMIdentitySystemAssigned,
- SystemAssignedIdentityRole: &infrav1.SystemAssignedIdentityRole{
- Name: existingRoleAssignmentName,
+
+ tests := []struct {
+ name string
+ machinePool *AzureMachinePool
+ wantErr bool
+ expectedRoleAssignmentName string
+ expectedSystemAssignedIdentityRole *infrav1.SystemAssignedIdentityRole
+ }{
+ {
+ name: "bothRoleAssignmentNamesPopulated",
+ machinePool: &AzureMachinePool{Spec: AzureMachinePoolSpec{
+ Identity: infrav1.VMIdentitySystemAssigned,
+ RoleAssignmentName: existingRoleAssignmentName,
+ SystemAssignedIdentityRole: &infrav1.SystemAssignedIdentityRole{
+ Name: existingRoleAssignmentName,
+ },
+ }},
+ expectedRoleAssignmentName: existingRoleAssignmentName,
+ expectedSystemAssignedIdentityRole: &infrav1.SystemAssignedIdentityRole{
+ Name: existingRoleAssignmentName,
+ },
},
- }}}
- notSystemAssignedTest := test{machinePool: &AzureMachinePool{Spec: AzureMachinePoolSpec{
- Identity: infrav1.VMIdentityUserAssigned,
- }}}
- systemAssignedIdentityRoleExistTest := test{machinePool: &AzureMachinePool{Spec: AzureMachinePoolSpec{
- Identity: infrav1.VMIdentitySystemAssigned,
- SystemAssignedIdentityRole: &infrav1.SystemAssignedIdentityRole{
- DefinitionID: fakeRoleDefinitionID,
- Scope: fakeScope,
+ {
+ name: "roleAssignmentExist",
+ machinePool: &AzureMachinePool{Spec: AzureMachinePoolSpec{
+ Identity: infrav1.VMIdentitySystemAssigned,
+ SystemAssignedIdentityRole: &infrav1.SystemAssignedIdentityRole{
+ Name: existingRoleAssignmentName,
+ },
+ }},
+ expectedSystemAssignedIdentityRole: &infrav1.SystemAssignedIdentityRole{
+ Name: existingRoleAssignmentName,
+ DefinitionID: fmt.Sprintf("/subscriptions/%s/providers/Microsoft.Authorization/roleDefinitions/%s", fakeSubscriptionID, infrav1.ContributorRoleID),
+ Scope: fmt.Sprintf("/subscriptions/%s/", fakeSubscriptionID),
+ },
},
- }}}
- deprecatedRoleAssignmentNameTest := test{machinePool: &AzureMachinePool{Spec: AzureMachinePoolSpec{
- Identity: infrav1.VMIdentitySystemAssigned,
- RoleAssignmentName: existingRoleAssignmentName,
- }}}
- emptyTest := test{machinePool: &AzureMachinePool{Spec: AzureMachinePoolSpec{
- Identity: infrav1.VMIdentitySystemAssigned,
- SystemAssignedIdentityRole: &infrav1.SystemAssignedIdentityRole{},
- }}}
-
- bothRoleAssignmentNamesPopulatedTest := test{machinePool: &AzureMachinePool{Spec: AzureMachinePoolSpec{
- Identity: infrav1.VMIdentitySystemAssigned,
- RoleAssignmentName: existingRoleAssignmentName,
- SystemAssignedIdentityRole: &infrav1.SystemAssignedIdentityRole{
- Name: existingRoleAssignmentName,
+ {
+ name: "notSystemAssigned",
+ machinePool: &AzureMachinePool{Spec: AzureMachinePoolSpec{
+ Identity: infrav1.VMIdentityUserAssigned,
+ }},
+ expectedSystemAssignedIdentityRole: nil,
},
- }}}
-
- bothRoleAssignmentNamesPopulatedTest.machinePool.SetIdentityDefaults(fakeSubscriptionID)
- g.Expect(bothRoleAssignmentNamesPopulatedTest.machinePool.Spec.RoleAssignmentName).To(Equal(existingRoleAssignmentName))
- g.Expect(bothRoleAssignmentNamesPopulatedTest.machinePool.Spec.SystemAssignedIdentityRole.Name).To(Equal(existingRoleAssignmentName))
-
- roleAssignmentExistTest.machinePool.SetIdentityDefaults(fakeSubscriptionID)
- g.Expect(roleAssignmentExistTest.machinePool.Spec.SystemAssignedIdentityRole.Name).To(Equal(existingRoleAssignmentName))
-
- notSystemAssignedTest.machinePool.SetIdentityDefaults(fakeSubscriptionID)
- g.Expect(notSystemAssignedTest.machinePool.Spec.SystemAssignedIdentityRole).To(BeNil())
-
- systemAssignedIdentityRoleExistTest.machinePool.SetIdentityDefaults(fakeSubscriptionID)
- g.Expect(systemAssignedIdentityRoleExistTest.machinePool.Spec.SystemAssignedIdentityRole.Scope).To(Equal(fakeScope))
- g.Expect(systemAssignedIdentityRoleExistTest.machinePool.Spec.SystemAssignedIdentityRole.DefinitionID).To(Equal(fakeRoleDefinitionID))
+ {
+ name: "systemAssignedIdentityRoleExist",
+ machinePool: &AzureMachinePool{Spec: AzureMachinePoolSpec{
+ Identity: infrav1.VMIdentitySystemAssigned,
+ SystemAssignedIdentityRole: &infrav1.SystemAssignedIdentityRole{
+ Name: existingRoleAssignmentName,
+ DefinitionID: fakeRoleDefinitionID,
+ Scope: fakeScope,
+ },
+ }},
+ expectedSystemAssignedIdentityRole: &infrav1.SystemAssignedIdentityRole{
+ Name: existingRoleAssignmentName,
+ DefinitionID: fakeRoleDefinitionID,
+ Scope: fakeScope,
+ },
+ },
+ {
+ name: "deprecatedRoleAssignmentName",
+ machinePool: &AzureMachinePool{Spec: AzureMachinePoolSpec{
+ Identity: infrav1.VMIdentitySystemAssigned,
+ RoleAssignmentName: existingRoleAssignmentName,
+ }},
+ expectedSystemAssignedIdentityRole: &infrav1.SystemAssignedIdentityRole{
+ Name: existingRoleAssignmentName,
+ DefinitionID: fmt.Sprintf("/subscriptions/%s/providers/Microsoft.Authorization/roleDefinitions/%s", fakeSubscriptionID, infrav1.ContributorRoleID),
+ Scope: fmt.Sprintf("/subscriptions/%s/", fakeSubscriptionID),
+ },
+ },
+ }
- deprecatedRoleAssignmentNameTest.machinePool.SetIdentityDefaults(fakeSubscriptionID)
- g.Expect(deprecatedRoleAssignmentNameTest.machinePool.Spec.SystemAssignedIdentityRole.Name).To(Equal(existingRoleAssignmentName))
- g.Expect(deprecatedRoleAssignmentNameTest.machinePool.Spec.RoleAssignmentName).To(BeEmpty())
+ for _, tc := range tests {
+ t.Run(tc.name, func(t *testing.T) {
+ g := NewWithT(t)
- emptyTest.machinePool.SetIdentityDefaults(fakeSubscriptionID)
- g.Expect(emptyTest.machinePool.Spec.SystemAssignedIdentityRole.Name).To(Not(BeEmpty()))
- _, err := uuid.Parse(emptyTest.machinePool.Spec.SystemAssignedIdentityRole.Name)
- g.Expect(err).To(Not(HaveOccurred()))
- g.Expect(emptyTest.machinePool.Spec.SystemAssignedIdentityRole).To(Not(BeNil()))
- g.Expect(emptyTest.machinePool.Spec.SystemAssignedIdentityRole.Scope).To(Equal(fmt.Sprintf("/subscriptions/%s/", fakeSubscriptionID)))
- g.Expect(emptyTest.machinePool.Spec.SystemAssignedIdentityRole.DefinitionID).To(Equal(fmt.Sprintf("/subscriptions/%s/providers/Microsoft.Authorization/roleDefinitions/%s", fakeSubscriptionID, infrav1.ContributorRoleID)))
+ scheme := runtime.NewScheme()
+ _ = AddToScheme(scheme)
+ _ = infrav1.AddToScheme(scheme)
+ _ = clusterv1.AddToScheme(scheme)
+ _ = expv1.AddToScheme(scheme)
+
+ machinePool := &expv1.MachinePool{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "pool1",
+ Namespace: "default",
+ Labels: map[string]string{
+ clusterv1.ClusterNameLabel: "testcluster",
+ },
+ },
+ Spec: expv1.MachinePoolSpec{
+ ClusterName: "testcluster",
+ },
+ }
+ azureCluster := &infrav1.AzureCluster{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "testcluster",
+ Namespace: "default",
+ },
+ Spec: infrav1.AzureClusterSpec{
+ AzureClusterClassSpec: infrav1.AzureClusterClassSpec{
+ SubscriptionID: fakeSubscriptionID,
+ },
+ },
+ }
+ cluster := &clusterv1.Cluster{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "testcluster",
+ Namespace: "default",
+ },
+ Spec: clusterv1.ClusterSpec{
+ InfrastructureRef: &corev1.ObjectReference{
+ Name: "testcluster",
+ Namespace: "default",
+ },
+ },
+ }
+
+ fakeClient := fake.NewClientBuilder().WithScheme(scheme).WithRuntimeObjects(tc.machinePool, machinePool, azureCluster, cluster).Build()
+ err := tc.machinePool.SetIdentityDefaults(fakeClient)
+ if tc.wantErr {
+ g.Expect(err).To(HaveOccurred())
+ } else {
+ g.Expect(err).NotTo(HaveOccurred())
+ g.Expect(tc.machinePool.Spec.RoleAssignmentName).To(Equal(tc.expectedRoleAssignmentName))
+ g.Expect(tc.machinePool.Spec.SystemAssignedIdentityRole).To(Equal(tc.expectedSystemAssignedIdentityRole))
+ }
+ })
+ }
}
func TestAzureMachinePool_SetDiagnosticsDefaults(t *testing.T) {
diff --git a/exp/controllers/azuremachinepool_controller.go b/exp/controllers/azuremachinepool_controller.go
index e565fb1c305..4bd132afc6c 100644
--- a/exp/controllers/azuremachinepool_controller.go
+++ b/exp/controllers/azuremachinepool_controller.go
@@ -97,11 +97,15 @@ func (ampr *AzureMachinePoolReconciler) SetupWithManager(ctx context.Context, mg
r = coalescing.NewReconciler(ampr, options.Cache, log)
}
- // create mapper to transform incoming AzureClusters into AzureMachinePool requests
+ // create mappers to transform incoming AzureClusters and AzureManagedClusters into AzureMachinePool requests
azureClusterMapper, err := AzureClusterToAzureMachinePoolsMapper(ctx, ampr.Client, mgr.GetScheme(), log)
if err != nil {
return errors.Wrapf(err, "failed to create AzureCluster to AzureMachinePools mapper")
}
+ azureManagedClusterMapper, err := AzureManagedClusterToAzureMachinePoolsMapper(ctx, ampr.Client, mgr.GetScheme(), log)
+ if err != nil {
+ return errors.Wrapf(err, "failed to create AzureManagedCluster to AzureMachinePools mapper")
+ }
c, err := ctrl.NewControllerManagedBy(mgr).
WithOptions(options.Options).
@@ -117,6 +121,11 @@ func (ampr *AzureMachinePoolReconciler) SetupWithManager(ctx context.Context, mg
&infrav1.AzureCluster{},
handler.EnqueueRequestsFromMapFunc(azureClusterMapper),
).
+ // watch for changes in AzureManagedControlPlane resources
+ Watches(
+ &infrav1.AzureManagedControlPlane{},
+ handler.EnqueueRequestsFromMapFunc(azureManagedClusterMapper),
+ ).
// watch for changes in KubeadmConfig to sync bootstrap token
Watches(
&kubeadmv1.KubeadmConfig{},
@@ -210,25 +219,9 @@ func (ampr *AzureMachinePoolReconciler) Reconcile(ctx context.Context, req ctrl.
logger = logger.WithValues("cluster", cluster.Name)
- logger = logger.WithValues("AzureCluster", cluster.Spec.InfrastructureRef.Name)
- azureClusterName := client.ObjectKey{
- Namespace: azMachinePool.Namespace,
- Name: cluster.Spec.InfrastructureRef.Name,
- }
- azureCluster := &infrav1.AzureCluster{}
- if err := ampr.Client.Get(ctx, azureClusterName, azureCluster); err != nil {
- logger.V(2).Info("AzureCluster is not available yet")
- return reconcile.Result{}, nil
- }
-
- // Create the cluster scope
- clusterScope, err := scope.NewClusterScope(ctx, scope.ClusterScopeParams{
- Client: ampr.Client,
- Cluster: cluster,
- AzureCluster: azureCluster,
- })
+ clusterScope, err := infracontroller.GetClusterScoper(ctx, logger, ampr.Client, cluster)
if err != nil {
- return reconcile.Result{}, err
+ return reconcile.Result{}, errors.Wrapf(err, "failed to create cluster scope for cluster %s/%s", cluster.Namespace, cluster.Name)
}
// Create the machine pool scope
@@ -239,7 +232,7 @@ func (ampr *AzureMachinePoolReconciler) Reconcile(ctx context.Context, req ctrl.
ClusterScope: clusterScope,
})
if err != nil {
- return reconcile.Result{}, errors.Wrap(err, "failed to create scope")
+ return reconcile.Result{}, errors.Wrap(err, "failed to create machinepool scope")
}
// Always close the scope when exiting this function so we can persist any AzureMachine changes.
@@ -252,7 +245,7 @@ func (ampr *AzureMachinePoolReconciler) Reconcile(ctx context.Context, req ctrl.
// Return early if the object or Cluster is paused.
if annotations.IsPaused(cluster, azMachinePool) {
logger.V(2).Info("AzureMachinePool or linked Cluster is marked as paused. Won't reconcile normally")
- return ampr.reconcilePause(ctx, machinePoolScope, clusterScope)
+ return ampr.reconcilePause(ctx, machinePoolScope)
}
// Handle deleted machine pools
@@ -261,10 +254,10 @@ func (ampr *AzureMachinePoolReconciler) Reconcile(ctx context.Context, req ctrl.
}
// Handle non-deleted machine pools
- return ampr.reconcileNormal(ctx, machinePoolScope, clusterScope)
+ return ampr.reconcileNormal(ctx, machinePoolScope, cluster)
}
-func (ampr *AzureMachinePoolReconciler) reconcileNormal(ctx context.Context, machinePoolScope *scope.MachinePoolScope, clusterScope *scope.ClusterScope) (_ reconcile.Result, reterr error) {
+func (ampr *AzureMachinePoolReconciler) reconcileNormal(ctx context.Context, machinePoolScope *scope.MachinePoolScope, cluster *clusterv1.Cluster) (_ reconcile.Result, reterr error) {
ctx, log, done := tele.StartSpanWithLogger(ctx, "controllers.AzureMachinePoolReconciler.reconcileNormal")
defer done()
@@ -283,7 +276,7 @@ func (ampr *AzureMachinePoolReconciler) reconcileNormal(ctx context.Context, mac
}
}
- if !clusterScope.Cluster.Status.InfrastructureReady {
+ if !cluster.Status.InfrastructureReady {
log.Info("Cluster infrastructure is not ready yet")
return reconcile.Result{}, nil
}
@@ -356,7 +349,7 @@ func (ampr *AzureMachinePoolReconciler) reconcileNormal(ctx context.Context, mac
return reconcile.Result{}, nil
}
-func (ampr *AzureMachinePoolReconciler) reconcilePause(ctx context.Context, machinePoolScope *scope.MachinePoolScope, clusterScope *scope.ClusterScope) (reconcile.Result, error) {
+func (ampr *AzureMachinePoolReconciler) reconcilePause(ctx context.Context, machinePoolScope *scope.MachinePoolScope) (reconcile.Result, error) {
ctx, log, done := tele.StartSpanWithLogger(ctx, "controllers.AzureMachinePoolReconciler.reconcilePause")
defer done()
@@ -368,13 +361,13 @@ func (ampr *AzureMachinePoolReconciler) reconcilePause(ctx context.Context, mach
}
if err := amps.Pause(ctx); err != nil {
- return reconcile.Result{}, errors.Wrapf(err, "error deleting AzureMachinePool %s/%s", clusterScope.Namespace(), machinePoolScope.Name())
+ return reconcile.Result{}, errors.Wrapf(err, "error deleting AzureMachinePool %s/%s", machinePoolScope.AzureMachinePool.Namespace, machinePoolScope.Name())
}
return reconcile.Result{}, nil
}
-func (ampr *AzureMachinePoolReconciler) reconcileDelete(ctx context.Context, machinePoolScope *scope.MachinePoolScope, clusterScope *scope.ClusterScope) (reconcile.Result, error) {
+func (ampr *AzureMachinePoolReconciler) reconcileDelete(ctx context.Context, machinePoolScope *scope.MachinePoolScope, clusterScope infracontroller.ClusterScoper) (reconcile.Result, error) {
ctx, log, done := tele.StartSpanWithLogger(ctx, "controllers.AzureMachinePoolReconciler.reconcileDelete")
defer done()
@@ -388,7 +381,7 @@ func (ampr *AzureMachinePoolReconciler) reconcileDelete(ctx context.Context, mac
log.V(4).Info("deleting AzureMachinePool resource individually")
if err := amps.Delete(ctx); err != nil {
- return reconcile.Result{}, errors.Wrapf(err, "error deleting AzureMachinePool %s/%s", clusterScope.Namespace(), machinePoolScope.Name())
+ return reconcile.Result{}, errors.Wrapf(err, "error deleting AzureMachinePool %s/%s", machinePoolScope.AzureMachinePool.Namespace, machinePoolScope.Name())
}
}
diff --git a/exp/controllers/azuremachinepool_controller_test.go b/exp/controllers/azuremachinepool_controller_test.go
index f2d7f06024e..14b8c6e3ed6 100644
--- a/exp/controllers/azuremachinepool_controller_test.go
+++ b/exp/controllers/azuremachinepool_controller_test.go
@@ -90,6 +90,7 @@ func TestAzureMachinePoolReconcilePaused(t *testing.T) {
Spec: clusterv1.ClusterSpec{
Paused: true,
InfrastructureRef: &corev1.ObjectReference{
+ Kind: "AzureCluster",
Name: name,
Namespace: namespace,
},
diff --git a/exp/controllers/azuremachinepoolmachine_controller.go b/exp/controllers/azuremachinepoolmachine_controller.go
index 6c5af492d94..39cca8a31b9 100644
--- a/exp/controllers/azuremachinepoolmachine_controller.go
+++ b/exp/controllers/azuremachinepoolmachine_controller.go
@@ -186,27 +186,9 @@ func (ampmr *AzureMachinePoolMachineController) Reconcile(ctx context.Context, r
return ctrl.Result{}, nil
}
- azureClusterName := client.ObjectKey{
- Namespace: machine.Namespace,
- Name: cluster.Spec.InfrastructureRef.Name,
- }
-
- azureCluster := &infrav1.AzureCluster{}
- if err := ampmr.Client.Get(ctx, azureClusterName, azureCluster); err != nil {
- logger.Info("AzureCluster is not available yet")
- return reconcile.Result{}, nil
- }
-
- logger = logger.WithValues("AzureCluster", azureCluster.Name)
-
- // Create the cluster scope
- clusterScope, err := scope.NewClusterScope(ctx, scope.ClusterScopeParams{
- Client: ampmr.Client,
- Cluster: cluster,
- AzureCluster: azureCluster,
- })
+ clusterScope, err := infracontroller.GetClusterScoper(ctx, logger, ampmr.Client, cluster)
if err != nil {
- return reconcile.Result{}, err
+ return reconcile.Result{}, errors.Wrapf(err, "failed to create cluster scope for cluster %s/%s", cluster.Namespace, cluster.Name)
}
// Create the machine pool scope
@@ -233,7 +215,7 @@ func (ampmr *AzureMachinePoolMachineController) Reconcile(ctx context.Context, r
return ampmr.reconcileDelete(ctx, machineScope)
}
- if !clusterScope.Cluster.Status.InfrastructureReady {
+ if !cluster.Status.InfrastructureReady {
logger.Info("Cluster infrastructure is not ready yet")
return reconcile.Result{}, nil
}
diff --git a/exp/controllers/azuremachinepoolmachine_controller_test.go b/exp/controllers/azuremachinepoolmachine_controller_test.go
index 1bd1dabca30..7f4c4d176bc 100644
--- a/exp/controllers/azuremachinepoolmachine_controller_test.go
+++ b/exp/controllers/azuremachinepoolmachine_controller_test.go
@@ -120,6 +120,9 @@ func TestAzureMachinePoolMachineReconciler_Reconcile(t *testing.T) {
func getAReadyMachinePoolMachineCluster() (*clusterv1.Cluster, *infrav1.AzureCluster, *expv1.MachinePool, *infrav1exp.AzureMachinePool, *infrav1exp.AzureMachinePoolMachine) {
azCluster := &infrav1.AzureCluster{
+ TypeMeta: metav1.TypeMeta{
+ Kind: "AzureCluster",
+ },
ObjectMeta: metav1.ObjectMeta{
Name: "azCluster1",
Namespace: "default",
@@ -132,13 +135,18 @@ func getAReadyMachinePoolMachineCluster() (*clusterv1.Cluster, *infrav1.AzureClu
}
cluster := &clusterv1.Cluster{
+ TypeMeta: metav1.TypeMeta{
+ Kind: "Cluster",
+ },
ObjectMeta: metav1.ObjectMeta{
Name: "cluster1",
Namespace: "default",
},
Spec: clusterv1.ClusterSpec{
InfrastructureRef: &corev1.ObjectReference{
- Name: azCluster.Name,
+ Name: azCluster.Name,
+ Namespace: "default",
+ Kind: "AzureCluster",
},
},
Status: clusterv1.ClusterStatus{
@@ -147,6 +155,9 @@ func getAReadyMachinePoolMachineCluster() (*clusterv1.Cluster, *infrav1.AzureClu
}
mp := &expv1.MachinePool{
+ TypeMeta: metav1.TypeMeta{
+ Kind: "MachinePool",
+ },
ObjectMeta: metav1.ObjectMeta{
Name: "mp1",
Namespace: "default",
@@ -157,6 +168,9 @@ func getAReadyMachinePoolMachineCluster() (*clusterv1.Cluster, *infrav1.AzureClu
}
amp := &infrav1exp.AzureMachinePool{
+ TypeMeta: metav1.TypeMeta{
+ Kind: "AzureMachinePool",
+ },
ObjectMeta: metav1.ObjectMeta{
Name: "amp1",
Namespace: "default",
diff --git a/exp/controllers/helpers.go b/exp/controllers/helpers.go
index 1dbcb4b9a42..c0866be8edb 100644
--- a/exp/controllers/helpers.go
+++ b/exp/controllers/helpers.go
@@ -97,6 +97,59 @@ func AzureClusterToAzureMachinePoolsMapper(ctx context.Context, c client.Client,
}, nil
}
+// AzureManagedClusterToAzureMachinePoolsMapper creates a mapping handler to transform AzureManagedClusters into AzureMachinePools. The transform
+// requires AzureManagedCluster to map to the owning Cluster, then from the Cluster, collect the MachinePools belonging to the cluster,
+// then finally projecting the infrastructure reference to the AzureMachinePool.
+func AzureManagedClusterToAzureMachinePoolsMapper(ctx context.Context, c client.Client, scheme *runtime.Scheme, log logr.Logger) (handler.MapFunc, error) {
+ gvk, err := apiutil.GVKForObject(new(infrav1exp.AzureMachinePool), scheme)
+ if err != nil {
+ return nil, errors.Wrap(err, "failed to find GVK for AzureMachinePool")
+ }
+
+ return func(ctx context.Context, o client.Object) []ctrl.Request {
+ ctx, cancel := context.WithTimeout(ctx, reconciler.DefaultMappingTimeout)
+ defer cancel()
+
+ azCluster, ok := o.(*infrav1.AzureManagedCluster)
+ if !ok {
+ log.Error(errors.Errorf("expected an AzureManagedCluster, got %T instead", o.GetObjectKind()), "failed to map AzureManagedCluster")
+ return nil
+ }
+
+ log = log.WithValues("AzureManagedCluster", azCluster.Name, "Namespace", azCluster.Namespace)
+
+ // Don't handle deleted AzureManagedCluster
+ if !azCluster.ObjectMeta.DeletionTimestamp.IsZero() {
+ log.V(4).Info("AzureManagedCluster has a deletion timestamp, skipping mapping.")
+ return nil
+ }
+
+ clusterName, ok := controllers.GetOwnerClusterName(azCluster.ObjectMeta)
+ if !ok {
+ log.V(4).Info("unable to get the owner cluster")
+ return nil
+ }
+
+ machineList := &expv1.MachinePoolList{}
+ machineList.SetGroupVersionKind(gvk)
+ // list all of the requested objects within the cluster namespace with the cluster name label
+ if err := c.List(ctx, machineList, client.InNamespace(azCluster.Namespace), client.MatchingLabels{clusterv1.ClusterNameLabel: clusterName}); err != nil {
+ log.V(4).Info(fmt.Sprintf("unable to list machine pools in cluster %s", clusterName))
+ return nil
+ }
+
+ mapFunc := MachinePoolToInfrastructureMapFunc(gvk, log)
+ var results []ctrl.Request
+ for _, machine := range machineList.Items {
+ m := machine
+ azureMachines := mapFunc(ctx, &m)
+ results = append(results, azureMachines...)
+ }
+
+ return results
+ }, nil
+}
+
// AzureMachinePoolMachineMapper creates a mapping handler to transform AzureMachinePoolMachine to AzureMachinePools.
func AzureMachinePoolMachineMapper(scheme *runtime.Scheme, log logr.Logger) handler.MapFunc {
return func(ctx context.Context, o client.Object) []ctrl.Request {
diff --git a/go.mod b/go.mod
index cc1c4ac9d6e..018a64375c7 100644
--- a/go.mod
+++ b/go.mod
@@ -44,9 +44,11 @@ require (
golang.org/x/crypto v0.14.0
golang.org/x/mod v0.13.0
golang.org/x/text v0.13.0
+ gopkg.in/yaml.v3 v3.0.1
k8s.io/api v0.27.2
k8s.io/apimachinery v0.27.2
k8s.io/client-go v0.27.2
+ k8s.io/cluster-bootstrap v0.27.2
k8s.io/component-base v0.27.2
k8s.io/klog/v2 v2.90.1
k8s.io/kubectl v0.27.2
@@ -192,12 +194,10 @@ require (
gopkg.in/inf.v0 v0.9.1 // indirect
gopkg.in/ini.v1 v1.67.0 // indirect
gopkg.in/yaml.v2 v2.4.0 // indirect
- gopkg.in/yaml.v3 v3.0.1 // indirect
k8s.io/apiextensions-apiserver v0.27.2 // indirect
k8s.io/apiserver v0.27.2 // indirect
k8s.io/cli-runtime v0.27.2 // indirect
k8s.io/cloud-provider v0.27.1 // indirect
- k8s.io/cluster-bootstrap v0.27.2 // indirect
k8s.io/kube-openapi v0.0.0-20230501164219-8b0f38b5fd1f // indirect
sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect
sigs.k8s.io/kustomize/api v0.13.2 // indirect
diff --git a/test/e2e/aks.go b/test/e2e/aks.go
index 1c61eda53e7..6ba7aa3c966 100644
--- a/test/e2e/aks.go
+++ b/test/e2e/aks.go
@@ -44,11 +44,18 @@ type DiscoverAndWaitForAKSControlPlaneInput struct {
// This will be invoked by cluster api e2e framework.
func WaitForAKSControlPlaneInitialized(ctx context.Context, input clusterctl.ApplyCustomClusterTemplateAndWaitInput, result *clusterctl.ApplyCustomClusterTemplateAndWaitResult) {
client := input.ClusterProxy.GetClient()
+ cluster := framework.GetClusterByName(ctx, framework.GetClusterByNameInput{
+ Getter: client,
+ Name: input.ClusterName,
+ Namespace: input.Namespace,
+ })
+
DiscoverAndWaitForAKSControlPlaneInitialized(ctx, DiscoverAndWaitForAKSControlPlaneInput{
Lister: client,
Getter: client,
Cluster: result.Cluster,
}, input.WaitForControlPlaneIntervals...)
+ InstallCNIManifest(ctx, input, cluster.Spec.ClusterNetwork.Services.CIDRBlocks, true)
}
// WaitForAKSControlPlaneReady waits for the azure managed control plane to be ready.
diff --git a/test/e2e/aks_byo_node.go b/test/e2e/aks_byo_node.go
new file mode 100644
index 00000000000..7f52044151f
--- /dev/null
+++ b/test/e2e/aks_byo_node.go
@@ -0,0 +1,190 @@
+//go:build e2e
+// +build e2e
+
+/*
+Copyright 2023 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package e2e
+
+import (
+ "context"
+ "os"
+
+ . "github.com/onsi/ginkgo/v2"
+ . "github.com/onsi/gomega"
+ corev1 "k8s.io/api/core/v1"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/utils/ptr"
+ infrav1 "sigs.k8s.io/cluster-api-provider-azure/api/v1beta1"
+ infrav1exp "sigs.k8s.io/cluster-api-provider-azure/exp/api/v1beta1"
+ clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"
+ bootstrapv1 "sigs.k8s.io/cluster-api/bootstrap/kubeadm/api/v1beta1"
+ expv1 "sigs.k8s.io/cluster-api/exp/api/v1beta1"
+ "sigs.k8s.io/cluster-api/util/conditions"
+ "sigs.k8s.io/controller-runtime/pkg/client"
+)
+
+type AKSBYONodeSpecInput struct {
+ Cluster *clusterv1.Cluster
+ KubernetesVersion string
+ WaitIntervals []interface{}
+ ExpectedWorkerNodes int32
+}
+
+func AKSBYONodeSpec(ctx context.Context, inputGetter func() AKSBYONodeSpecInput) {
+ input := inputGetter()
+
+ mgmtClient := bootstrapClusterProxy.GetClient()
+ Expect(mgmtClient).NotTo(BeNil())
+
+ infraControlPlane := &infrav1.AzureManagedControlPlane{}
+ err := mgmtClient.Get(ctx, client.ObjectKey{Namespace: input.Cluster.Spec.ControlPlaneRef.Namespace, Name: input.Cluster.Spec.ControlPlaneRef.Name}, infraControlPlane)
+ Expect(err).NotTo(HaveOccurred())
+
+ By("Creating a self-managed machine pool with 2 nodes")
+ infraMachinePool := &infrav1exp.AzureMachinePool{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "byo-pool",
+ Namespace: input.Cluster.Namespace,
+ },
+ Spec: infrav1exp.AzureMachinePoolSpec{
+ Location: infraControlPlane.Spec.Location,
+ Template: infrav1exp.AzureMachinePoolMachineTemplate{
+ VMSize: os.Getenv("AZURE_NODE_MACHINE_TYPE"),
+ },
+ },
+ }
+ err = mgmtClient.Create(ctx, infraMachinePool)
+ Expect(err).NotTo(HaveOccurred())
+
+ kubeadmConfig := &bootstrapv1.KubeadmConfig{
+ ObjectMeta: metav1.ObjectMeta{
+ Namespace: infraMachinePool.Namespace,
+ Name: infraMachinePool.Name,
+ },
+ Spec: bootstrapv1.KubeadmConfigSpec{
+ Files: []bootstrapv1.File{
+ {
+ ContentFrom: &bootstrapv1.FileSource{
+ Secret: bootstrapv1.SecretFileSource{
+ Name: infraMachinePool.Name + "-azure-json",
+ Key: "worker-node-azure.json",
+ },
+ },
+ Path: "/etc/kubernetes/azure.json",
+ Permissions: "0644",
+ Owner: "root:root",
+ },
+ {
+ ContentFrom: &bootstrapv1.FileSource{
+ Secret: bootstrapv1.SecretFileSource{
+ Name: input.Cluster.Name + "-kubeconfig",
+ Key: "value",
+ },
+ },
+ Path: "/etc/kubernetes/admin.conf",
+ Permissions: "0644",
+ Owner: "root:root",
+ },
+ },
+ JoinConfiguration: &bootstrapv1.JoinConfiguration{
+ Discovery: bootstrapv1.Discovery{
+ File: &bootstrapv1.FileDiscovery{
+ KubeConfigPath: "/etc/kubernetes/admin.conf",
+ },
+ },
+ NodeRegistration: bootstrapv1.NodeRegistrationOptions{
+ Name: "{{ ds.meta_data[\"local_hostname\"] }}",
+ KubeletExtraArgs: map[string]string{
+ "cloud-provider": "external",
+ "azure-container-registry-config": "/etc/kubernetes/azure.json",
+ },
+ },
+ },
+ PreKubeadmCommands: []string{"kubeadm init phase upload-config all"},
+ },
+ }
+ err = mgmtClient.Create(ctx, kubeadmConfig)
+ Expect(err).NotTo(HaveOccurred())
+
+ machinePool := &expv1.MachinePool{
+ ObjectMeta: metav1.ObjectMeta{
+ Namespace: infraMachinePool.Namespace,
+ Name: infraMachinePool.Name,
+ },
+ Spec: expv1.MachinePoolSpec{
+ ClusterName: input.Cluster.Name,
+ Replicas: ptr.To[int32](2),
+ Template: clusterv1.MachineTemplateSpec{
+ Spec: clusterv1.MachineSpec{
+ Bootstrap: clusterv1.Bootstrap{
+ ConfigRef: &corev1.ObjectReference{
+ APIVersion: bootstrapv1.GroupVersion.String(),
+ Kind: "KubeadmConfig",
+ Name: kubeadmConfig.Name,
+ },
+ },
+ ClusterName: input.Cluster.Name,
+ InfrastructureRef: corev1.ObjectReference{
+ APIVersion: infrav1.GroupVersion.String(),
+ Kind: "AzureMachinePool",
+ Name: infraMachinePool.Name,
+ },
+ Version: ptr.To(input.KubernetesVersion),
+ },
+ },
+ },
+ }
+ err = mgmtClient.Create(ctx, machinePool)
+ Expect(err).NotTo(HaveOccurred())
+
+ By("creating a Kubernetes client to the workload cluster")
+ workloadClusterProxy := bootstrapClusterProxy.GetWorkloadCluster(ctx, input.Cluster.Spec.ControlPlaneRef.Namespace, input.Cluster.Spec.ControlPlaneRef.Name)
+ Expect(workloadClusterProxy).NotTo(BeNil())
+ clientset := workloadClusterProxy.GetClientSet()
+ Expect(clientset).NotTo(BeNil())
+
+ By("Verifying the bootstrap succeeded")
+ Eventually(func(g Gomega) {
+ pool := &infrav1exp.AzureMachinePool{}
+ err := mgmtClient.Get(ctx, client.ObjectKeyFromObject(infraMachinePool), pool)
+ g.Expect(err).NotTo(HaveOccurred())
+ g.Expect(conditions.IsTrue(pool, infrav1.BootstrapSucceededCondition)).To(BeTrue())
+ }, input.WaitIntervals...).Should(Succeed())
+
+ By("Adding the expected AKS labels to the nodes")
+ // TODO: move this to the MachinePool object once MachinePools support label propagation
+ Eventually(func(g Gomega) {
+ nodeList, err := clientset.CoreV1().Nodes().List(ctx, metav1.ListOptions{})
+ g.Expect(err).NotTo(HaveOccurred())
+ g.Expect(int32(len(nodeList.Items))).To(Equal(input.ExpectedWorkerNodes + 2))
+ for i, node := range nodeList.Items {
+ if _, ok := node.Labels["kubernetes.azure.com/cluster"]; !ok {
+ node.Labels["kubernetes.azure.com/cluster"] = infraControlPlane.Spec.NodeResourceGroupName
+ _, err := clientset.CoreV1().Nodes().Update(ctx, &nodeList.Items[i], metav1.UpdateOptions{})
+ g.Expect(err).NotTo(HaveOccurred())
+ }
+ }
+ }, input.WaitIntervals...).Should(Succeed())
+
+ By("Verifying the MachinePool becomes ready")
+ Eventually(func(g Gomega) {
+ pool := &expv1.MachinePool{}
+ err := mgmtClient.Get(ctx, client.ObjectKeyFromObject(machinePool), pool)
+ g.Expect(err).NotTo(HaveOccurred())
+ g.Expect(conditions.IsTrue(pool, clusterv1.ReadyCondition)).To(BeTrue())
+ }, input.WaitIntervals...).Should(Succeed())
+}
diff --git a/test/e2e/aks_public_ip_prefix.go b/test/e2e/aks_public_ip_prefix.go
index 69f36483e5b..735539ef37a 100644
--- a/test/e2e/aks_public_ip_prefix.go
+++ b/test/e2e/aks_public_ip_prefix.go
@@ -21,6 +21,7 @@ package e2e
import (
"context"
+ "os"
"github.com/Azure/azure-sdk-for-go/sdk/azidentity"
"github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4"
@@ -89,7 +90,7 @@ func AKSPublicIPPrefixSpec(ctx context.Context, inputGetter func() AKSPublicIPPr
},
Spec: infrav1.AzureManagedMachinePoolSpec{
Mode: "User",
- SKU: "Standard_D2s_v3",
+ SKU: os.Getenv("AZURE_NODE_MACHINE_TYPE"),
EnableNodePublicIP: ptr.To(true),
NodePublicIPPrefixID: ptr.To("/subscriptions/" + subscriptionID + "/resourceGroups/" + resourceGroupName + "/providers/Microsoft.Network/publicipprefixes/" + *publicIPPrefix.Name),
},
diff --git a/test/e2e/azure_test.go b/test/e2e/azure_test.go
index 1150bd5fb7e..02cbab7c7e8 100644
--- a/test/e2e/azure_test.go
+++ b/test/e2e/azure_test.go
@@ -737,6 +737,7 @@ var _ = Describe("Workload cluster creation", func() {
clusterctl.ApplyClusterTemplateAndWait(ctx, createApplyClusterTemplateInput(
specName,
withFlavor("aks"),
+ withAzureCNIv1Manifest(e2eConfig.GetVariable(AzureCNIv1Manifest)),
withNamespace(namespace.Name),
withClusterName(clusterName),
withKubernetesVersion(kubernetesVersionUpgradeFrom),
@@ -843,6 +844,17 @@ var _ = Describe("Workload cluster creation", func() {
}
})
})
+
+ By("creating a byo nodepool", func() {
+ AKSBYONodeSpec(ctx, func() AKSBYONodeSpecInput {
+ return AKSBYONodeSpecInput{
+ Cluster: result.Cluster,
+ KubernetesVersion: kubernetesVersion,
+ WaitIntervals: e2eConfig.GetIntervals(specName, "wait-worker-nodes"),
+ ExpectedWorkerNodes: result.ExpectedWorkerNodes(),
+ }
+ })
+ })
})
})
diff --git a/test/e2e/cni.go b/test/e2e/cni.go
index 131f166afd0..5bb485d8676 100644
--- a/test/e2e/cni.go
+++ b/test/e2e/cni.go
@@ -78,8 +78,10 @@ func EnsureCalicoIsReady(ctx context.Context, input clusterctl.ApplyCustomCluste
By("Copying kubeadm config map to calico-system namespace")
workloadClusterClient := clusterProxy.GetClient()
- // Copy the kubeadm configmap to the calico-system namespace. This is a workaround needed for the calico-node-windows daemonset to be able to run in the calico-system namespace.
- CopyConfigMap(ctx, input, workloadClusterClient, kubeadmConfigMapName, kubesystem, CalicoSystemNamespace)
+ if hasWindows {
+ // Copy the kubeadm configmap to the calico-system namespace. This is a workaround needed for the calico-node-windows daemonset to be able to run in the calico-system namespace.
+ CopyConfigMap(ctx, input, workloadClusterClient, kubeadmConfigMapName, kubesystem, CalicoSystemNamespace)
+ }
By("Waiting for Ready tigera-operator deployment pods")
for _, d := range []string{"tigera-operator"} {