From aa4bcdc837ffdf0fd264f29fe19065382b2fcc0e Mon Sep 17 00:00:00 2001 From: Richard Case Date: Thu, 17 Oct 2024 07:40:51 +0100 Subject: [PATCH] feat: update e2e test config and docs changes This updates the kubernetes versions used in the e2e tests to use k8s versions that we AMIs for. It also marks some of the old CSI tests as pending. A few minor docs updates have been made as a result of review. Signed-off-by: Richard Case --- docs/book/src/topics/images/built-amis.md | 4 +- test/e2e/data/e2e_conf.yaml | 10 +- test/e2e/suites/unmanaged/helpers_test.go | 2 +- .../unmanaged/unmanaged_functional_test.go | 149 +++++++++++------- 4 files changed, 98 insertions(+), 67 deletions(-) diff --git a/docs/book/src/topics/images/built-amis.md b/docs/book/src/topics/images/built-amis.md index b3903eea62..767dfb33d5 100644 --- a/docs/book/src/topics/images/built-amis.md +++ b/docs/book/src/topics/images/built-amis.md @@ -9,7 +9,7 @@ New AMIs are built on a best effort basis when a new Kubernetes version is relea - When there is a new k8s release series then any AMIs no longer covered by the previous point will be deleted. For example, when v1.31.0 is published then any AMIs for the v1.28 release series will be deleted. - Existing AMIs are not updated for security fixes and it is recommended to always use the latest patch version for the Kubernetes version you want to run. -> NOTE: As the old community images where located in an AWS account that the project no longer has access to and because those AMIs have been automatically deleted we have started publishing images again from v1.29.9 +> NOTE: As the old community images were located in an AWS account that the project no longer has access to and because those AMIs have been automatically deleted, we have started publishing images again starting from Kubernetes v1.29.9. ## Finding AMIs @@ -21,7 +21,7 @@ If you are using a version of clusterawsadm prior to v2.6.2 then you will need t - Ubuntu (ubuntu-22.04, ubuntu-24.04) - Flatcar (flatcar-stable) -> Note: Centos (centos-7) and Amazon Linux 2 (amazon-2) where supported but there are some issues with the AMI build that need fixing. +> Note: Centos (centos-7) and Amazon Linux 2 (amazon-2) where supported but there are some issues with the AMI build that need fixing. See this [issue](https://github.com/kubernetes-sigs/cluster-api-provider-aws/issues/5142) for details. ## Supported AWS Regions - ap-northeast-1 diff --git a/test/e2e/data/e2e_conf.yaml b/test/e2e/data/e2e_conf.yaml index 3a998b9974..612e780fd8 100644 --- a/test/e2e/data/e2e_conf.yaml +++ b/test/e2e/data/e2e_conf.yaml @@ -176,10 +176,10 @@ variables: # allowing the same e2e config file to be re-used in different Prow jobs e.g. each one with a K8s version permutation. # The following Kubernetes versions should be the latest versions with already published kindest/node images. # This avoids building node images in the default case which improves the test duration significantly. - KUBERNETES_VERSION_MANAGEMENT: "v1.30.2" - KUBERNETES_VERSION: "v1.26.6" - KUBERNETES_VERSION_UPGRADE_TO: "v1.26.6" - KUBERNETES_VERSION_UPGRADE_FROM: "v1.25.12" + KUBERNETES_VERSION_MANAGEMENT: "v1.29.8" + KUBERNETES_VERSION: "v1.29.9" + KUBERNETES_VERSION_UPGRADE_TO: "v1.29.9" + KUBERNETES_VERSION_UPGRADE_FROM: "v1.29.8" # Pre and post 1.23 Kubernetes versions are being used for CSI upgrade tests PRE_1_23_KUBERNETES_VERSION: "v1.22.17" POST_1_23_KUBERNETES_VERSION: "v1.23.15" @@ -190,7 +190,7 @@ variables: AWS_NODE_MACHINE_TYPE: t3.large AWS_MACHINE_TYPE_VCPU_USAGE: 2 AWS_SSH_KEY_NAME: "cluster-api-provider-aws-sigs-k8s-io" - CONFORMANCE_CI_ARTIFACTS_KUBERNETES_VERSION: "v1.29.1" + CONFORMANCE_CI_ARTIFACTS_KUBERNETES_VERSION: "v1.29.9" CONFORMANCE_WORKER_MACHINE_COUNT: "5" CONFORMANCE_CONTROL_PLANE_MACHINE_COUNT: "3" ETCD_VERSION_UPGRADE_TO: "3.5.11-0" diff --git a/test/e2e/suites/unmanaged/helpers_test.go b/test/e2e/suites/unmanaged/helpers_test.go index e2a132dae2..0e1cd4c354 100644 --- a/test/e2e/suites/unmanaged/helpers_test.go +++ b/test/e2e/suites/unmanaged/helpers_test.go @@ -529,7 +529,7 @@ func makeJoinBootstrapConfigTemplate(namespace, name string) *bootstrapv1.Kubead JoinConfiguration: &bootstrapv1.JoinConfiguration{ NodeRegistration: bootstrapv1.NodeRegistrationOptions{ Name: "{{ ds.meta_data.local_hostname }}", - KubeletExtraArgs: map[string]string{"cloud-provider": "aws"}, + KubeletExtraArgs: map[string]string{"cloud-provider": "external"}, }, }, }, diff --git a/test/e2e/suites/unmanaged/unmanaged_functional_test.go b/test/e2e/suites/unmanaged/unmanaged_functional_test.go index 5a87a9039e..6cca7cfb20 100644 --- a/test/e2e/suites/unmanaged/unmanaged_functional_test.go +++ b/test/e2e/suites/unmanaged/unmanaged_functional_test.go @@ -65,11 +65,13 @@ var _ = ginkgo.Context("[unmanaged] [functional]", func() { ginkgo.Describe("Workload cluster with EFS driver", func() { ginkgo.It("should pass dynamic provisioning test", func() { specName := "functional-efs-support" - requiredResources = &shared.TestResource{EC2Normal: 2 * e2eCtx.Settings.InstanceVCPU, IGW: 1, NGW: 1, VPC: 1, ClassicLB: 1, EIP: 1, EventBridgeRules: 50} - requiredResources.WriteRequestedResources(e2eCtx, "efs-support-test") + if !e2eCtx.Settings.SkipQuotas { + requiredResources = &shared.TestResource{EC2Normal: 2 * e2eCtx.Settings.InstanceVCPU, IGW: 1, NGW: 1, VPC: 1, ClassicLB: 1, EIP: 1, EventBridgeRules: 50} + requiredResources.WriteRequestedResources(e2eCtx, "efs-support-test") - Expect(shared.AcquireResources(requiredResources, ginkgo.GinkgoParallelProcess(), flock.New(shared.ResourceQuotaFilePath))).To(Succeed()) - defer shared.ReleaseResources(requiredResources, ginkgo.GinkgoParallelProcess(), flock.New(shared.ResourceQuotaFilePath)) + Expect(shared.AcquireResources(requiredResources, ginkgo.GinkgoParallelProcess(), flock.New(shared.ResourceQuotaFilePath))).To(Succeed()) + defer shared.ReleaseResources(requiredResources, ginkgo.GinkgoParallelProcess(), flock.New(shared.ResourceQuotaFilePath)) + } Expect(e2eCtx.E2EConfig).ToNot(BeNil(), "Invalid argument. e2eConfig can't be nil when calling %s spec", specName) Expect(e2eCtx.E2EConfig.Variables).To(HaveKey(shared.KubernetesVersion)) @@ -119,12 +121,15 @@ var _ = ginkgo.Context("[unmanaged] [functional]", func() { ginkgo.It("should create cluster with single worker", func() { ginkgo.Skip("Args field of clusterctl.ApplyClusterTemplateAndWaitInput was removed, need to add support for server-side filtering.") specName := "functional-gpu-cluster" - // Change the multiplier for EC2GPU if GPU type is changed. g4dn.xlarge uses 2 vCPU - requiredResources = &shared.TestResource{EC2GPU: 2 * 2, IGW: 1, NGW: 1, VPC: 1, ClassicLB: 1, EIP: 1, EventBridgeRules: 50} - requiredResources.WriteRequestedResources(e2eCtx, "gpu-test") namespace := shared.SetupSpecNamespace(ctx, specName, e2eCtx) - Expect(shared.AcquireResources(requiredResources, ginkgo.GinkgoParallelProcess(), flock.New(shared.ResourceQuotaFilePath))).To(Succeed()) - defer shared.ReleaseResources(requiredResources, ginkgo.GinkgoParallelProcess(), flock.New(shared.ResourceQuotaFilePath)) + if !e2eCtx.Settings.SkipQuotas { + // Change the multiplier for EC2GPU if GPU type is changed. g4dn.xlarge uses 2 vCPU + requiredResources = &shared.TestResource{EC2GPU: 2 * 2, IGW: 1, NGW: 1, VPC: 1, ClassicLB: 1, EIP: 1, EventBridgeRules: 50} + requiredResources.WriteRequestedResources(e2eCtx, "gpu-test") + + Expect(shared.AcquireResources(requiredResources, ginkgo.GinkgoParallelProcess(), flock.New(shared.ResourceQuotaFilePath))).To(Succeed()) + defer shared.ReleaseResources(requiredResources, ginkgo.GinkgoParallelProcess(), flock.New(shared.ResourceQuotaFilePath)) + } ginkgo.By("Creating cluster with a single worker") clusterName := fmt.Sprintf("%s-%s", specName, util.RandomString(6)) @@ -169,10 +174,12 @@ var _ = ginkgo.Context("[unmanaged] [functional]", func() { ginkgo.It("should create cluster with nested assumed role", func() { // Setup a Namespace where to host objects for this spec and create a watcher for the namespace events. specName := "functional-multitenancy-nested" - requiredResources = &shared.TestResource{EC2Normal: 1 * e2eCtx.Settings.InstanceVCPU, IGW: 1, NGW: 1, VPC: 1, ClassicLB: 1, EIP: 1, EventBridgeRules: 50} - requiredResources.WriteRequestedResources(e2eCtx, specName) - Expect(shared.AcquireResources(requiredResources, ginkgo.GinkgoParallelProcess(), flock.New(shared.ResourceQuotaFilePath))).To(Succeed()) - defer shared.ReleaseResources(requiredResources, ginkgo.GinkgoParallelProcess(), flock.New(shared.ResourceQuotaFilePath)) + if !e2eCtx.Settings.SkipQuotas { + requiredResources = &shared.TestResource{EC2Normal: 1 * e2eCtx.Settings.InstanceVCPU, IGW: 1, NGW: 1, VPC: 1, ClassicLB: 1, EIP: 1, EventBridgeRules: 50} + requiredResources.WriteRequestedResources(e2eCtx, specName) + Expect(shared.AcquireResources(requiredResources, ginkgo.GinkgoParallelProcess(), flock.New(shared.ResourceQuotaFilePath))).To(Succeed()) + defer shared.ReleaseResources(requiredResources, ginkgo.GinkgoParallelProcess(), flock.New(shared.ResourceQuotaFilePath)) + } namespace := shared.SetupSpecNamespace(ctx, specName, e2eCtx) defer shared.DumpSpecResourcesAndCleanup(ctx, "", namespace, e2eCtx) Expect(shared.SetMultitenancyEnvVars(e2eCtx.AWSSession)).To(Succeed()) @@ -244,11 +251,13 @@ var _ = ginkgo.Context("[unmanaged] [functional]", func() { ginkgo.Context("in same namespace", func() { ginkgo.It("should create the clusters", func() { specName := "upgrade-to-main-branch-k8s" - requiredResources = &shared.TestResource{EC2Normal: 2 * e2eCtx.Settings.InstanceVCPU, IGW: 1, NGW: 3, VPC: 1, ClassicLB: 1, EIP: 3, EventBridgeRules: 50} - requiredResources.WriteRequestedResources(e2eCtx, "upgrade-to-master-test") - Expect(shared.AcquireResources(requiredResources, ginkgo.GinkgoParallelProcess(), flock.New(shared.ResourceQuotaFilePath))).To(Succeed()) - defer shared.ReleaseResources(requiredResources, ginkgo.GinkgoParallelProcess(), flock.New(shared.ResourceQuotaFilePath)) namespace := shared.SetupSpecNamespace(ctx, specName, e2eCtx) + if !e2eCtx.Settings.SkipQuotas { + requiredResources = &shared.TestResource{EC2Normal: 2 * e2eCtx.Settings.InstanceVCPU, IGW: 1, NGW: 3, VPC: 1, ClassicLB: 1, EIP: 3, EventBridgeRules: 50} + requiredResources.WriteRequestedResources(e2eCtx, "upgrade-to-master-test") + Expect(shared.AcquireResources(requiredResources, ginkgo.GinkgoParallelProcess(), flock.New(shared.ResourceQuotaFilePath))).To(Succeed()) + defer shared.ReleaseResources(requiredResources, ginkgo.GinkgoParallelProcess(), flock.New(shared.ResourceQuotaFilePath)) + } defer shared.DumpSpecResourcesAndCleanup(ctx, "", namespace, e2eCtx) ginkgo.By("Creating first cluster with single control plane") cluster1Name := fmt.Sprintf("%s-%s", specName, util.RandomString(6)) @@ -300,10 +309,12 @@ var _ = ginkgo.Context("[unmanaged] [functional]", func() { ginkgo.PDescribe("CSI=in-tree CCM=in-tree AWSCSIMigration=off: upgrade to v1.23", func() { ginkgo.It("should create volumes dynamically with in tree CSI driver and in tree cloud provider", func() { specName := "csimigration-off-upgrade" - requiredResources = &shared.TestResource{EC2Normal: 2 * e2eCtx.Settings.InstanceVCPU, IGW: 1, NGW: 1, VPC: 1, ClassicLB: 1, EIP: 1, VolumeGP2: 4, EventBridgeRules: 50} - requiredResources.WriteRequestedResources(e2eCtx, specName) - Expect(shared.AcquireResources(requiredResources, ginkgo.GinkgoParallelProcess(), flock.New(shared.ResourceQuotaFilePath))).To(Succeed()) - defer shared.ReleaseResources(requiredResources, ginkgo.GinkgoParallelProcess(), flock.New(shared.ResourceQuotaFilePath)) + if !e2eCtx.Settings.SkipQuotas { + requiredResources = &shared.TestResource{EC2Normal: 2 * e2eCtx.Settings.InstanceVCPU, IGW: 1, NGW: 1, VPC: 1, ClassicLB: 1, EIP: 1, VolumeGP2: 4, EventBridgeRules: 50} + requiredResources.WriteRequestedResources(e2eCtx, specName) + Expect(shared.AcquireResources(requiredResources, ginkgo.GinkgoParallelProcess(), flock.New(shared.ResourceQuotaFilePath))).To(Succeed()) + defer shared.ReleaseResources(requiredResources, ginkgo.GinkgoParallelProcess(), flock.New(shared.ResourceQuotaFilePath)) + } namespace := shared.SetupNamespace(ctx, specName, e2eCtx) defer shared.DumpSpecResourcesAndCleanup(ctx, "", namespace, e2eCtx) @@ -366,13 +377,15 @@ var _ = ginkgo.Context("[unmanaged] [functional]", func() { }) }) - ginkgo.Describe("CSI=external CCM=in-tree AWSCSIMigration=on: upgrade to v1.23", func() { + ginkgo.PDescribe("CSI=external CCM=in-tree AWSCSIMigration=on: upgrade to v1.23", func() { ginkgo.It("should create volumes dynamically with external CSI driver and in tree cloud provider", func() { specName := "only-csi-external-upgrade" - requiredResources = &shared.TestResource{EC2Normal: 2 * e2eCtx.Settings.InstanceVCPU, IGW: 1, NGW: 1, VPC: 1, ClassicLB: 1, EIP: 1, VolumeGP2: 4, EventBridgeRules: 50} - requiredResources.WriteRequestedResources(e2eCtx, specName) - Expect(shared.AcquireResources(requiredResources, ginkgo.GinkgoParallelProcess(), flock.New(shared.ResourceQuotaFilePath))).To(Succeed()) - defer shared.ReleaseResources(requiredResources, ginkgo.GinkgoParallelProcess(), flock.New(shared.ResourceQuotaFilePath)) + if !e2eCtx.Settings.SkipQuotas { + requiredResources = &shared.TestResource{EC2Normal: 2 * e2eCtx.Settings.InstanceVCPU, IGW: 1, NGW: 1, VPC: 1, ClassicLB: 1, EIP: 1, VolumeGP2: 4, EventBridgeRules: 50} + requiredResources.WriteRequestedResources(e2eCtx, specName) + Expect(shared.AcquireResources(requiredResources, ginkgo.GinkgoParallelProcess(), flock.New(shared.ResourceQuotaFilePath))).To(Succeed()) + defer shared.ReleaseResources(requiredResources, ginkgo.GinkgoParallelProcess(), flock.New(shared.ResourceQuotaFilePath)) + } namespace := shared.SetupNamespace(ctx, specName, e2eCtx) defer shared.DumpSpecResourcesAndCleanup(ctx, "", namespace, e2eCtx) ginkgo.By("Creating first cluster with single control plane") @@ -436,13 +449,15 @@ var _ = ginkgo.Context("[unmanaged] [functional]", func() { }) }) - ginkgo.Describe("CSI=external CCM=external AWSCSIMigration=on: upgrade to v1.23", func() { + ginkgo.PDescribe("CSI=external CCM=external AWSCSIMigration=on: upgrade to v1.23", func() { ginkgo.It("should create volumes dynamically with external CSI driver and external cloud provider", func() { specName := "csi-ccm-external-upgrade" - requiredResources = &shared.TestResource{EC2Normal: 2 * e2eCtx.Settings.InstanceVCPU, IGW: 1, NGW: 1, VPC: 1, ClassicLB: 1, EIP: 1, VolumeGP2: 4, EventBridgeRules: 50} - requiredResources.WriteRequestedResources(e2eCtx, specName) - Expect(shared.AcquireResources(requiredResources, ginkgo.GinkgoParallelProcess(), flock.New(shared.ResourceQuotaFilePath))).To(Succeed()) - defer shared.ReleaseResources(requiredResources, ginkgo.GinkgoParallelProcess(), flock.New(shared.ResourceQuotaFilePath)) + if !e2eCtx.Settings.SkipQuotas { + requiredResources = &shared.TestResource{EC2Normal: 2 * e2eCtx.Settings.InstanceVCPU, IGW: 1, NGW: 1, VPC: 1, ClassicLB: 1, EIP: 1, VolumeGP2: 4, EventBridgeRules: 50} + requiredResources.WriteRequestedResources(e2eCtx, specName) + Expect(shared.AcquireResources(requiredResources, ginkgo.GinkgoParallelProcess(), flock.New(shared.ResourceQuotaFilePath))).To(Succeed()) + defer shared.ReleaseResources(requiredResources, ginkgo.GinkgoParallelProcess(), flock.New(shared.ResourceQuotaFilePath)) + } namespace := shared.SetupNamespace(ctx, specName, e2eCtx) defer shared.DumpSpecResourcesAndCleanup(ctx, "", namespace, e2eCtx) @@ -509,10 +524,12 @@ var _ = ginkgo.Context("[unmanaged] [functional]", func() { ginkgo.Describe("Workload cluster with AWS SSM Parameter as the Secret Backend", func() { ginkgo.It("should be creatable and deletable", func() { specName := "functional-test-ssm-parameter-store" - requiredResources = &shared.TestResource{EC2Normal: 2 * e2eCtx.Settings.InstanceVCPU, IGW: 1, NGW: 1, VPC: 1, ClassicLB: 1, EIP: 3, EventBridgeRules: 50} - requiredResources.WriteRequestedResources(e2eCtx, specName) - Expect(shared.AcquireResources(requiredResources, ginkgo.GinkgoParallelProcess(), flock.New(shared.ResourceQuotaFilePath))).To(Succeed()) - defer shared.ReleaseResources(requiredResources, ginkgo.GinkgoParallelProcess(), flock.New(shared.ResourceQuotaFilePath)) + if !e2eCtx.Settings.SkipQuotas { + requiredResources = &shared.TestResource{EC2Normal: 2 * e2eCtx.Settings.InstanceVCPU, IGW: 1, NGW: 1, VPC: 1, ClassicLB: 1, EIP: 3, EventBridgeRules: 50} + requiredResources.WriteRequestedResources(e2eCtx, specName) + Expect(shared.AcquireResources(requiredResources, ginkgo.GinkgoParallelProcess(), flock.New(shared.ResourceQuotaFilePath))).To(Succeed()) + defer shared.ReleaseResources(requiredResources, ginkgo.GinkgoParallelProcess(), flock.New(shared.ResourceQuotaFilePath)) + } namespace := shared.SetupSpecNamespace(ctx, specName, e2eCtx) defer shared.DumpSpecResourcesAndCleanup(ctx, "", namespace, e2eCtx) @@ -543,10 +560,12 @@ var _ = ginkgo.Context("[unmanaged] [functional]", func() { ginkgo.Describe("MachineDeployment misconfigurations", func() { ginkgo.It("MachineDeployment misconfigurations", func() { specName := "functional-test-md-misconfigurations" - requiredResources = &shared.TestResource{EC2Normal: 1 * e2eCtx.Settings.InstanceVCPU, IGW: 1, NGW: 1, VPC: 1, ClassicLB: 1, EIP: 3, EventBridgeRules: 50} - requiredResources.WriteRequestedResources(e2eCtx, specName) - Expect(shared.AcquireResources(requiredResources, ginkgo.GinkgoParallelProcess(), flock.New(shared.ResourceQuotaFilePath))).To(Succeed()) - defer shared.ReleaseResources(requiredResources, ginkgo.GinkgoParallelProcess(), flock.New(shared.ResourceQuotaFilePath)) + if !e2eCtx.Settings.SkipQuotas { + requiredResources = &shared.TestResource{EC2Normal: 1 * e2eCtx.Settings.InstanceVCPU, IGW: 1, NGW: 1, VPC: 1, ClassicLB: 1, EIP: 3, EventBridgeRules: 50} + requiredResources.WriteRequestedResources(e2eCtx, specName) + Expect(shared.AcquireResources(requiredResources, ginkgo.GinkgoParallelProcess(), flock.New(shared.ResourceQuotaFilePath))).To(Succeed()) + defer shared.ReleaseResources(requiredResources, ginkgo.GinkgoParallelProcess(), flock.New(shared.ResourceQuotaFilePath)) + } namespace := shared.SetupSpecNamespace(ctx, specName, e2eCtx) defer shared.DumpSpecResourcesAndCleanup(ctx, "", namespace, e2eCtx) ginkgo.By("Creating a cluster") @@ -594,10 +613,12 @@ var _ = ginkgo.Context("[unmanaged] [functional]", func() { ginkgo.Describe("Workload cluster in multiple AZs", func() { ginkgo.It("It should be creatable and deletable", func() { specName := "functional-test-multi-az" - requiredResources = &shared.TestResource{EC2Normal: 3 * e2eCtx.Settings.InstanceVCPU, IGW: 1, NGW: 1, VPC: 1, ClassicLB: 1, EIP: 3} - requiredResources.WriteRequestedResources(e2eCtx, specName) - Expect(shared.AcquireResources(requiredResources, ginkgo.GinkgoParallelProcess(), flock.New(shared.ResourceQuotaFilePath))).To(Succeed()) - defer shared.ReleaseResources(requiredResources, ginkgo.GinkgoParallelProcess(), flock.New(shared.ResourceQuotaFilePath)) + if !e2eCtx.Settings.SkipQuotas { + requiredResources = &shared.TestResource{EC2Normal: 3 * e2eCtx.Settings.InstanceVCPU, IGW: 1, NGW: 1, VPC: 1, ClassicLB: 1, EIP: 3} + requiredResources.WriteRequestedResources(e2eCtx, specName) + Expect(shared.AcquireResources(requiredResources, ginkgo.GinkgoParallelProcess(), flock.New(shared.ResourceQuotaFilePath))).To(Succeed()) + defer shared.ReleaseResources(requiredResources, ginkgo.GinkgoParallelProcess(), flock.New(shared.ResourceQuotaFilePath)) + } namespace := shared.SetupSpecNamespace(ctx, specName, e2eCtx) defer shared.DumpSpecResourcesAndCleanup(ctx, "", namespace, e2eCtx) ginkgo.By("Creating a cluster") @@ -649,10 +670,12 @@ var _ = ginkgo.Context("[unmanaged] [functional]", func() { ginkgo.Context("in different namespaces with machine failures", func() { ginkgo.It("should setup namespaces correctly for the two clusters", func() { specName := "functional-test-multi-namespace" - requiredResources = &shared.TestResource{EC2Normal: 4 * e2eCtx.Settings.InstanceVCPU, IGW: 2, NGW: 2, VPC: 2, ClassicLB: 2, EIP: 6, EventBridgeRules: 50} - requiredResources.WriteRequestedResources(e2eCtx, specName) - Expect(shared.AcquireResources(requiredResources, ginkgo.GinkgoParallelProcess(), flock.New(shared.ResourceQuotaFilePath))).To(Succeed()) - defer shared.ReleaseResources(requiredResources, ginkgo.GinkgoParallelProcess(), flock.New(shared.ResourceQuotaFilePath)) + if !e2eCtx.Settings.SkipQuotas { + requiredResources = &shared.TestResource{EC2Normal: 4 * e2eCtx.Settings.InstanceVCPU, IGW: 2, NGW: 2, VPC: 2, ClassicLB: 2, EIP: 6, EventBridgeRules: 50} + requiredResources.WriteRequestedResources(e2eCtx, specName) + Expect(shared.AcquireResources(requiredResources, ginkgo.GinkgoParallelProcess(), flock.New(shared.ResourceQuotaFilePath))).To(Succeed()) + defer shared.ReleaseResources(requiredResources, ginkgo.GinkgoParallelProcess(), flock.New(shared.ResourceQuotaFilePath)) + } ginkgo.By("Creating first cluster with single control plane") ns1, cf1 := framework.CreateNamespaceAndWatchEvents(ctx, framework.CreateNamespaceAndWatchEventsInput{ @@ -732,10 +755,12 @@ var _ = ginkgo.Context("[unmanaged] [functional]", func() { ginkgo.Context("Defining clusters in the same namespace", func() { specName := "functional-test-multi-cluster-single-namespace" ginkgo.It("should create the clusters", func() { - requiredResources = &shared.TestResource{EC2Normal: 2 * e2eCtx.Settings.InstanceVCPU, IGW: 2, NGW: 2, VPC: 2, ClassicLB: 2, EIP: 6, EventBridgeRules: 50} - requiredResources.WriteRequestedResources(e2eCtx, specName) - Expect(shared.AcquireResources(requiredResources, ginkgo.GinkgoParallelProcess(), flock.New(shared.ResourceQuotaFilePath))).To(Succeed()) - defer shared.ReleaseResources(requiredResources, ginkgo.GinkgoParallelProcess(), flock.New(shared.ResourceQuotaFilePath)) + if !e2eCtx.Settings.SkipQuotas { + requiredResources = &shared.TestResource{EC2Normal: 2 * e2eCtx.Settings.InstanceVCPU, IGW: 2, NGW: 2, VPC: 2, ClassicLB: 2, EIP: 6, EventBridgeRules: 50} + requiredResources.WriteRequestedResources(e2eCtx, specName) + Expect(shared.AcquireResources(requiredResources, ginkgo.GinkgoParallelProcess(), flock.New(shared.ResourceQuotaFilePath))).To(Succeed()) + defer shared.ReleaseResources(requiredResources, ginkgo.GinkgoParallelProcess(), flock.New(shared.ResourceQuotaFilePath)) + } namespace := shared.SetupSpecNamespace(ctx, specName, e2eCtx) defer shared.DumpSpecResourcesAndCleanup(ctx, "", namespace, e2eCtx) ginkgo.By("Creating first cluster with single control plane") @@ -760,10 +785,12 @@ var _ = ginkgo.Context("[unmanaged] [functional]", func() { ginkgo.Describe("Workload cluster with spot instances", func() { ginkgo.It("should be creatable and deletable", func() { specName := "functional-test-spot-instances" - requiredResources = &shared.TestResource{EC2Normal: 2 * e2eCtx.Settings.InstanceVCPU, IGW: 1, NGW: 1, VPC: 1, ClassicLB: 1, EIP: 3, EventBridgeRules: 50} - requiredResources.WriteRequestedResources(e2eCtx, specName) - Expect(shared.AcquireResources(requiredResources, ginkgo.GinkgoParallelProcess(), flock.New(shared.ResourceQuotaFilePath))).To(Succeed()) - defer shared.ReleaseResources(requiredResources, ginkgo.GinkgoParallelProcess(), flock.New(shared.ResourceQuotaFilePath)) + if !e2eCtx.Settings.SkipQuotas { + requiredResources = &shared.TestResource{EC2Normal: 2 * e2eCtx.Settings.InstanceVCPU, IGW: 1, NGW: 1, VPC: 1, ClassicLB: 1, EIP: 3, EventBridgeRules: 50} + requiredResources.WriteRequestedResources(e2eCtx, specName) + Expect(shared.AcquireResources(requiredResources, ginkgo.GinkgoParallelProcess(), flock.New(shared.ResourceQuotaFilePath))).To(Succeed()) + defer shared.ReleaseResources(requiredResources, ginkgo.GinkgoParallelProcess(), flock.New(shared.ResourceQuotaFilePath)) + } namespace := shared.SetupSpecNamespace(ctx, specName, e2eCtx) defer shared.DumpSpecResourcesAndCleanup(ctx, "", namespace, e2eCtx) ginkgo.By("Creating a cluster") @@ -809,9 +836,11 @@ var _ = ginkgo.Context("[unmanaged] [functional]", func() { // Some infrastructure creation was moved to a setup node to better organize the test. ginkgo.JustBeforeEach(func() { - requiredResources = &shared.TestResource{EC2Normal: 2 * e2eCtx.Settings.InstanceVCPU, IGW: 2, NGW: 2, VPC: 2, ClassicLB: 2, EIP: 5, EventBridgeRules: 50} - requiredResources.WriteRequestedResources(e2eCtx, specName) - Expect(shared.AcquireResources(requiredResources, ginkgo.GinkgoParallelProcess(), flock.New(shared.ResourceQuotaFilePath))).To(Succeed()) + if !e2eCtx.Settings.SkipQuotas { + requiredResources = &shared.TestResource{EC2Normal: 2 * e2eCtx.Settings.InstanceVCPU, IGW: 2, NGW: 2, VPC: 2, ClassicLB: 2, EIP: 5, EventBridgeRules: 50} + requiredResources.WriteRequestedResources(e2eCtx, specName) + Expect(shared.AcquireResources(requiredResources, ginkgo.GinkgoParallelProcess(), flock.New(shared.ResourceQuotaFilePath))).To(Succeed()) + } namespace = shared.SetupSpecNamespace(ctx, specName, e2eCtx) ginkgo.By("Creating the management cluster infrastructure") mgmtClusterInfra.New(shared.AWSInfrastructureSpec{ @@ -839,7 +868,9 @@ var _ = ginkgo.Context("[unmanaged] [functional]", func() { // Infrastructure cleanup is done in setup node so it is not bypassed if there is a test failure in the subject node. ginkgo.JustAfterEach(func() { - shared.ReleaseResources(requiredResources, ginkgo.GinkgoParallelProcess(), flock.New(shared.ResourceQuotaFilePath)) + if !e2eCtx.Settings.SkipQuotas { + shared.ReleaseResources(requiredResources, ginkgo.GinkgoParallelProcess(), flock.New(shared.ResourceQuotaFilePath)) + } shared.DumpSpecResourcesAndCleanup(ctx, "", namespace, e2eCtx) if !e2eCtx.Settings.SkipCleanup { ginkgo.By("Deleting peering connection")