diff --git a/test/e2e/data/e2e_conf.yaml b/test/e2e/data/e2e_conf.yaml index 1790ed7014..394b2e626d 100644 --- a/test/e2e/data/e2e_conf.yaml +++ b/test/e2e/data/e2e_conf.yaml @@ -137,6 +137,7 @@ providers: - sourcePath: "./infrastructure-aws/withoutclusterclass/generated/cluster-template-limit-az.yaml" - sourcePath: "./infrastructure-aws/withoutclusterclass/generated/cluster-template-machine-pool.yaml" - sourcePath: "./infrastructure-aws/withoutclusterclass/generated/cluster-template-md-remediation.yaml" + - sourcePath: "./infrastructure-aws/withoutclusterclass/generated/cluster-template-multi-az.yaml" - sourcePath: "./infrastructure-aws/withoutclusterclass/generated/cluster-template-nested-multitenancy.yaml" - sourcePath: "./infrastructure-aws/withoutclusterclass/generated/cluster-template-remote-management-cluster.yaml" - sourcePath: "./infrastructure-aws/withoutclusterclass/generated/cluster-template-simple-multitenancy.yaml" diff --git a/test/e2e/shared/defaults.go b/test/e2e/shared/defaults.go index 8bc736565f..5342a5cfc7 100644 --- a/test/e2e/shared/defaults.go +++ b/test/e2e/shared/defaults.go @@ -50,6 +50,7 @@ const ( AwsNodeMachineType = "AWS_NODE_MACHINE_TYPE" AwsAvailabilityZone1 = "AWS_AVAILABILITY_ZONE_1" AwsAvailabilityZone2 = "AWS_AVAILABILITY_ZONE_2" + MultiAzFlavor = "multi-az" LimitAzFlavor = "limit-az" SpotInstancesFlavor = "spot-instances" SSMFlavor = "ssm" diff --git a/test/e2e/suites/unmanaged/helpers_test.go b/test/e2e/suites/unmanaged/helpers_test.go index b4f838b31f..dc1c9bff7b 100644 --- a/test/e2e/suites/unmanaged/helpers_test.go +++ b/test/e2e/suites/unmanaged/helpers_test.go @@ -388,6 +388,35 @@ func getEvents(namespace string) *corev1.EventList { return eventsList } +func getSubnetID(filterKey, filterValue, clusterName string) *string { + var subnetOutput *ec2.DescribeSubnetsOutput + var err error + + ec2Client := ec2.New(e2eCtx.AWSSession) + subnetInput := &ec2.DescribeSubnetsInput{ + Filters: []*ec2.Filter{ + { + Name: aws.String(filterKey), + Values: []*string{ + aws.String(filterValue), + }, + }, + { + Name: aws.String("tag-key"), + Values: aws.StringSlice([]string{"sigs.k8s.io/cluster-api-provider-aws/cluster/" + clusterName}), + }, + }, + } + + Eventually(func() int { + subnetOutput, err = ec2Client.DescribeSubnets(subnetInput) + Expect(err).NotTo(HaveOccurred()) + return len(subnetOutput.Subnets) + }, e2eCtx.E2EConfig.GetIntervals("", "wait-infra-subnets")...).Should(Equal(1)) + + return subnetOutput.Subnets[0].SubnetId +} + func getVolumeIds(info statefulSetInfo, k8sclient crclient.Client) []*string { ginkgo.By("Retrieving IDs of dynamically provisioned volumes.") statefulset := &appsv1.StatefulSet{} diff --git a/test/e2e/suites/unmanaged/unmanaged_functional_test.go b/test/e2e/suites/unmanaged/unmanaged_functional_test.go index a26ce2831c..cc073cd1d5 100644 --- a/test/e2e/suites/unmanaged/unmanaged_functional_test.go +++ b/test/e2e/suites/unmanaged/unmanaged_functional_test.go @@ -22,6 +22,7 @@ package unmanaged import ( "context" "fmt" + "os" "path/filepath" "strings" "time" @@ -583,6 +584,51 @@ var _ = ginkgo.Context("[unmanaged] [functional]", func() { }) }) + ginkgo.Describe("Workload cluster in multiple AZs", func() { + ginkgo.It("It should be creatable and deletable", func() { + specName := "functional-test-multi-az" + requiredResources = &shared.TestResource{EC2Normal: 3 * e2eCtx.Settings.InstanceVCPU, IGW: 1, NGW: 1, VPC: 1, ClassicLB: 1, EIP: 3} + requiredResources.WriteRequestedResources(e2eCtx, specName) + Expect(shared.AcquireResources(requiredResources, ginkgo.GinkgoParallelProcess(), flock.New(shared.ResourceQuotaFilePath))).To(Succeed()) + defer shared.ReleaseResources(requiredResources, ginkgo.GinkgoParallelProcess(), flock.New(shared.ResourceQuotaFilePath)) + namespace := shared.SetupSpecNamespace(ctx, specName, e2eCtx) + defer shared.DumpSpecResourcesAndCleanup(ctx, "", namespace, e2eCtx) + ginkgo.By("Creating a cluster") + clusterName := fmt.Sprintf("%s-%s", specName, util.RandomString(6)) + configCluster := defaultConfigCluster(clusterName, namespace.Name) + configCluster.ControlPlaneMachineCount = pointer.Int64(3) + configCluster.Flavor = shared.MultiAzFlavor + cluster, _, _ := createCluster(ctx, configCluster, result) + + ginkgo.By("Adding worker nodes to additional subnets") + mdName1 := clusterName + "-md-1" + mdName2 := clusterName + "-md-2" + az1 := os.Getenv(shared.AwsAvailabilityZone1) + az2 := os.Getenv(shared.AwsAvailabilityZone2) + md1 := makeMachineDeployment(namespace.Name, mdName1, clusterName, &az1, 1) + md2 := makeMachineDeployment(namespace.Name, mdName2, clusterName, &az2, 1) + + // private CIDRs set in cluster-template-multi-az.yaml. + framework.CreateMachineDeployment(ctx, framework.CreateMachineDeploymentInput{ + Creator: e2eCtx.Environment.BootstrapClusterProxy.GetClient(), + MachineDeployment: md1, + BootstrapConfigTemplate: makeJoinBootstrapConfigTemplate(namespace.Name, mdName1), + InfraMachineTemplate: makeAWSMachineTemplate(namespace.Name, mdName1, e2eCtx.E2EConfig.GetVariable(shared.AwsNodeMachineType), getSubnetID("cidr-block", "10.0.0.0/24", clusterName)), + }) + framework.CreateMachineDeployment(ctx, framework.CreateMachineDeploymentInput{ + Creator: e2eCtx.Environment.BootstrapClusterProxy.GetClient(), + MachineDeployment: md2, + BootstrapConfigTemplate: makeJoinBootstrapConfigTemplate(namespace.Name, mdName2), + InfraMachineTemplate: makeAWSMachineTemplate(namespace.Name, mdName2, e2eCtx.E2EConfig.GetVariable(shared.AwsNodeMachineType), getSubnetID("cidr-block", "10.0.2.0/24", clusterName)), + }) + + ginkgo.By("Waiting for new worker nodes to become ready") + k8sClient := e2eCtx.Environment.BootstrapClusterProxy.GetClient() + framework.WaitForMachineDeploymentNodesToExist(ctx, framework.WaitForMachineDeploymentNodesToExistInput{Lister: k8sClient, Cluster: cluster, MachineDeployment: md1}, e2eCtx.E2EConfig.GetIntervals("", "wait-worker-nodes")...) + framework.WaitForMachineDeploymentNodesToExist(ctx, framework.WaitForMachineDeploymentNodesToExistInput{Lister: k8sClient, Cluster: cluster, MachineDeployment: md2}, e2eCtx.E2EConfig.GetIntervals("", "wait-worker-nodes")...) + }) + }) + // TODO @randomvariable: Await more resources ginkgo.PDescribe("Multiple workload clusters", func() { ginkgo.Context("in different namespaces with machine failures", func() {