From 6a4fc9bf5904718aa34bbae6693266548e15bfbd Mon Sep 17 00:00:00 2001 From: Christian Schlotter Date: Mon, 2 Dec 2024 12:59:26 +0100 Subject: [PATCH 1/3] e2e: add NodeDrain e2e test --- test/e2e/config/vsphere.yaml | 2 + test/e2e/node_drain_test.go | 376 +++++++++++++++++++++++++++++++++++ 2 files changed, 378 insertions(+) create mode 100644 test/e2e/node_drain_test.go diff --git a/test/e2e/config/vsphere.yaml b/test/e2e/config/vsphere.yaml index 80b23b5256..66fa38552b 100644 --- a/test/e2e/config/vsphere.yaml +++ b/test/e2e/config/vsphere.yaml @@ -345,6 +345,8 @@ intervals: default/wait-nodes-ready: ["10m", "10s"] default/wait-machine-remediation: ["15m", "10s"] mhc-remediation/mhc-remediation: ["30m", "10s"] + node-drain/wait-control-plane: ["15m", "10s"] node-drain/wait-deployment-available: ["3m", "10s"] node-drain/wait-machine-deleted: ["2m", "10s"] + node-drain/wait-statefulset-available: ["3m", "10s"] anti-affinity/wait-vm-redistribution: ["5m", "10s"] diff --git a/test/e2e/node_drain_test.go b/test/e2e/node_drain_test.go new file mode 100644 index 0000000000..58d2d83f4d --- /dev/null +++ b/test/e2e/node_drain_test.go @@ -0,0 +1,376 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package e2e + +import ( + "context" + "fmt" + "strings" + "time" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + storagev1 "k8s.io/api/storage/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/kubernetes" + "k8s.io/klog/v2" + "k8s.io/utils/ptr" + clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + controlplanev1 "sigs.k8s.io/cluster-api/controlplane/kubeadm/api/v1beta1" + capi_e2e "sigs.k8s.io/cluster-api/test/e2e" + "sigs.k8s.io/cluster-api/test/framework" + . "sigs.k8s.io/cluster-api/test/framework/ginkgoextensions" + "sigs.k8s.io/cluster-api/util/patch" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +var _ = Describe("When testing Node drain [supervisor] [PR-Blocking]", func() { + const specName = "node-drain" // copied from CAPI + Setup(specName, func(testSpecificSettingsGetter func() testSettings) { + capi_e2e.NodeDrainTimeoutSpec(ctx, func() capi_e2e.NodeDrainTimeoutSpecInput { + return capi_e2e.NodeDrainTimeoutSpecInput{ + E2EConfig: e2eConfig, + ClusterctlConfigPath: testSpecificSettingsGetter().ClusterctlConfigPath, + BootstrapClusterProxy: bootstrapClusterProxy, + ArtifactFolder: artifactFolder, + SkipCleanup: skipCleanup, + Flavor: ptr.To(testSpecificSettingsGetter().FlavorForMode("topology")), + InfrastructureProvider: ptr.To("vsphere"), + PostNamespaceCreated: testSpecificSettingsGetter().PostNamespaceCreatedFunc, + // Add verification for CSI blocking volume detachments. + VerifyNodeVolumeDetach: true, + CreateAdditionalResources: deployStatefulSetAndBlockCSI, + UnblockNodeVolumeDetachment: unblockNodeVolumeDetachment, + } + }) + }) +}) + +func deployStatefulSetAndBlockCSI(ctx context.Context, bootstrapClusterProxy framework.ClusterProxy, cluster *clusterv1.Cluster) { + controlplane := framework.DiscoveryAndWaitForControlPlaneInitialized(ctx, framework.DiscoveryAndWaitForControlPlaneInitializedInput{ + Lister: bootstrapClusterProxy.GetClient(), + Cluster: cluster, + }, e2eConfig.GetIntervals("node-drain", "wait-control-plane")...) + + mds := framework.DiscoveryAndWaitForMachineDeployments(ctx, framework.DiscoveryAndWaitForMachineDeploymentsInput{ + Lister: bootstrapClusterProxy.GetClient(), + Cluster: cluster, + }, e2eConfig.GetIntervals("node-drain", "wait-worker-nodes")...) + + // This label will be added to all Machines so we can later create Pods on the right Nodes. + nodeOwnerLabelKey := "owner.node.cluster.x-k8s.io" + + workloadClusterProxy := bootstrapClusterProxy.GetWorkloadCluster(ctx, cluster.Namespace, cluster.Name) + + By("Deploy a storageclass for CSI") + err := workloadClusterProxy.GetClient().Create(ctx, &storagev1.StorageClass{ + ObjectMeta: metav1.ObjectMeta{ + Name: "sts-pvc", + }, + Provisioner: "csi.vsphere.vmware.com", + ReclaimPolicy: ptr.To(corev1.PersistentVolumeReclaimDelete), + }) + if !apierrors.IsAlreadyExists(err) { + Expect(err).ToNot(HaveOccurred()) + } + + By("Deploy StatefulSets with evictable Pods without finalizer on control plane and MachineDeployment Nodes.") + deployEvictablePod(ctx, deployEvictablePodInput{ + WorkloadClusterProxy: workloadClusterProxy, + ControlPlane: controlplane, + StatefulSetName: "sts-cp", + Namespace: "evictable-workload", + NodeSelector: map[string]string{nodeOwnerLabelKey: "KubeadmControlPlane-" + controlplane.Name}, + WaitForStatefulSetAvailableInterval: e2eConfig.GetIntervals("node-drain", "wait-statefulset-available"), + }) + for _, md := range mds { + deployEvictablePod(ctx, deployEvictablePodInput{ + WorkloadClusterProxy: workloadClusterProxy, + MachineDeployment: md, + StatefulSetName: fmt.Sprintf("sts-%s", md.Name), + Namespace: "evictable-workload", + NodeSelector: map[string]string{nodeOwnerLabelKey: "MachineDeployment-" + md.Name}, + WaitForStatefulSetAvailableInterval: e2eConfig.GetIntervals("node-drain", "wait-statefulset-available"), + }) + } + + By("Scaling down the CSI controller to block lifecycle of PVC's") + csiController := &appsv1.Deployment{} + csiControllerKey := client.ObjectKey{ + Namespace: "vmware-system-csi", + Name: "vsphere-csi-controller", + } + Expect(workloadClusterProxy.GetClient().Get(ctx, csiControllerKey, csiController)).To(Succeed()) + patchHelper, err := patch.NewHelper(csiController, workloadClusterProxy.GetClient()) + Expect(err).ToNot(HaveOccurred()) + csiController.Spec.Replicas = ptr.To[int32](0) + Expect(patchHelper.Patch(ctx, csiController)).To(Succeed()) + waitForDeploymentScaledDown(ctx, workloadClusterProxy.GetClient(), csiControllerKey, e2eConfig.GetIntervals("node-drain", "wait-deployment-available")...) +} + +func unblockNodeVolumeDetachment(ctx context.Context, bootstrapClusterProxy framework.ClusterProxy, cluster *clusterv1.Cluster) { + workloadClusterProxy := bootstrapClusterProxy.GetWorkloadCluster(ctx, cluster.Namespace, cluster.Name) + + By("Scaling up the CSI controller to unblock lifecycle of PVC's") + csiController := &appsv1.Deployment{} + csiControllerKey := client.ObjectKey{ + Namespace: "vmware-system-csi", + Name: "vsphere-csi-controller", + } + Expect(workloadClusterProxy.GetClient().Get(ctx, csiControllerKey, csiController)).To(Succeed()) + patchHelper, err := patch.NewHelper(csiController, workloadClusterProxy.GetClient()) + Expect(err).ToNot(HaveOccurred()) + csiController.Spec.Replicas = ptr.To[int32](1) + Expect(patchHelper.Patch(ctx, csiController)).To(Succeed()) + + framework.WaitForDeploymentsAvailable(ctx, framework.WaitForDeploymentsAvailableInput{ + Getter: workloadClusterProxy.GetClient(), + Deployment: csiController, + }, e2eConfig.GetIntervals("node-drain", "wait-deployment-available")...) +} + +func waitForDeploymentScaledDown(ctx context.Context, getter framework.Getter, objectKey client.ObjectKey, intervals ...interface{}) { + Byf("Waiting for deployment %s to be scaled to 0", objectKey) + deployment := &appsv1.Deployment{} + Eventually(func() bool { + if err := getter.Get(ctx, objectKey, deployment); err != nil { + return false + } + if deployment.Status.Replicas == 0 && deployment.Status.AvailableReplicas == 0 { + return true + } + return false + }, intervals...).Should(BeTrue()) +} + +const ( + nodeRoleControlPlane = "node-role.kubernetes.io/control-plane" +) + +type deployEvictablePodInput struct { + WorkloadClusterProxy framework.ClusterProxy + ControlPlane *controlplanev1.KubeadmControlPlane + MachineDeployment *clusterv1.MachineDeployment + StatefulSetName string + Namespace string + NodeSelector map[string]string + + ModifyStatefulSet func(statefulSet *appsv1.StatefulSet) + + WaitForStatefulSetAvailableInterval []interface{} +} + +// deployEvictablePod will deploy a StatefulSet on a ControlPlane or MachineDeployment. +// It will deploy one Pod replica to each Machine. +func deployEvictablePod(ctx context.Context, input deployEvictablePodInput) { + Expect(input.StatefulSetName).ToNot(BeNil(), "Need a statefulset name in DeployUnevictablePod") + Expect(input.Namespace).ToNot(BeNil(), "Need a namespace in DeployUnevictablePod") + Expect(input.WorkloadClusterProxy).ToNot(BeNil(), "Need a workloadClusterProxy in DeployUnevictablePod") + Expect((input.MachineDeployment == nil && input.ControlPlane != nil) || + (input.MachineDeployment != nil && input.ControlPlane == nil)).To(BeTrue(), "Either MachineDeployment or ControlPlane must be set in DeployUnevictablePod") + + framework.EnsureNamespace(ctx, input.WorkloadClusterProxy.GetClient(), input.Namespace) + + workloadStatefulSet := generateStatefulset(generateStatefulsetInput{ + ControlPlane: input.ControlPlane, + MachineDeployment: input.MachineDeployment, + Name: input.StatefulSetName, + Namespace: input.Namespace, + NodeSelector: input.NodeSelector, + }) + + if input.ModifyStatefulSet != nil { + input.ModifyStatefulSet(workloadStatefulSet) + } + + workloadClient := input.WorkloadClusterProxy.GetClientSet() + + addStatefulSetToWorkloadCluster(ctx, addStatefulSetToWorkloadClusterInput{ + Namespace: input.Namespace, + ClientSet: workloadClient, + StatefulSet: workloadStatefulSet, + }) + + waitForStatefulSetsAvailable(ctx, WaitForStatefulSetsAvailableInput{ + Getter: input.WorkloadClusterProxy.GetClient(), + StatefulSet: workloadStatefulSet, + }, input.WaitForStatefulSetAvailableInterval...) +} + +type generateStatefulsetInput struct { + ControlPlane *controlplanev1.KubeadmControlPlane + MachineDeployment *clusterv1.MachineDeployment + Name string + Namespace string + NodeSelector map[string]string +} + +func generateStatefulset(input generateStatefulsetInput) *appsv1.StatefulSet { + workloadStatefulSet := &appsv1.StatefulSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: input.Name, + Namespace: input.Namespace, + }, + Spec: appsv1.StatefulSetSpec{ + Selector: &metav1.LabelSelector{ + MatchLabels: map[string]string{ + "app": "nonstop", + "statefulset": input.Name, + }, + }, + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{ + "app": "nonstop", + "statefulset": input.Name, + }, + }, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{{ + Name: "main", + Image: "registry.k8s.io/pause:3.10", + VolumeMounts: []corev1.VolumeMount{{ + Name: "sts-pvc", + MountPath: "/data", + }}, + }}, + Affinity: &corev1.Affinity{ + // Make sure only 1 Pod of this StatefulSet can run on the same Node. + PodAntiAffinity: &corev1.PodAntiAffinity{ + RequiredDuringSchedulingIgnoredDuringExecution: []corev1.PodAffinityTerm{ + { + LabelSelector: &metav1.LabelSelector{ + MatchExpressions: []metav1.LabelSelectorRequirement{{ + Key: "statefulset", + Operator: "In", + Values: []string{input.Name}, + }}, + }, + TopologyKey: "kubernetes.io/hostname", + }, + }, + }, + }, + }, + }, + VolumeClaimTemplates: []corev1.PersistentVolumeClaim{{ + ObjectMeta: metav1.ObjectMeta{ + Name: "sts-pvc", + }, + Spec: corev1.PersistentVolumeClaimSpec{ + AccessModes: []corev1.PersistentVolumeAccessMode{corev1.ReadWriteOnce}, + Resources: corev1.VolumeResourceRequirements{ + Requests: corev1.ResourceList{ + corev1.ResourceStorage: resource.MustParse("1Gi"), + }, + }, + StorageClassName: ptr.To("sts-pvc"), + }, + }}, + }, + } + + if input.ControlPlane != nil { + workloadStatefulSet.Spec.Template.Spec.NodeSelector = map[string]string{nodeRoleControlPlane: ""} + workloadStatefulSet.Spec.Template.Spec.Tolerations = []corev1.Toleration{ + { + Key: nodeRoleControlPlane, + Effect: "NoSchedule", + }, + } + workloadStatefulSet.Spec.Replicas = input.ControlPlane.Spec.Replicas + } + if input.MachineDeployment != nil { + workloadStatefulSet.Spec.Replicas = input.MachineDeployment.Spec.Replicas + } + + // Note: If set, the NodeSelector field overwrites the NodeSelector we set above for control plane nodes. + if input.NodeSelector != nil { + workloadStatefulSet.Spec.Template.Spec.NodeSelector = input.NodeSelector + } + + return workloadStatefulSet +} + +type addStatefulSetToWorkloadClusterInput struct { + ClientSet *kubernetes.Clientset + StatefulSet *appsv1.StatefulSet + Namespace string +} + +func addStatefulSetToWorkloadCluster(ctx context.Context, input addStatefulSetToWorkloadClusterInput) { + Eventually(func() error { + result, err := input.ClientSet.AppsV1().StatefulSets(input.Namespace).Create(ctx, input.StatefulSet, metav1.CreateOptions{}) + if result != nil && err == nil { + return nil + } + return fmt.Errorf("statefulset %s not successfully created in workload cluster: %v", klog.KObj(input.StatefulSet), err) + }, retryableOperationTimeout, retryableOperationInterval).Should(Succeed(), "Failed to create statefulset %s in workload cluster", klog.KObj(input.StatefulSet)) +} + +const ( + retryableOperationInterval = 3 * time.Second + // retryableOperationTimeout requires a higher value especially for self-hosted upgrades. + // Short unavailability of the Kube APIServer due to joining etcd members paired with unreachable conversion webhooks due to + // failed leader election and thus controller restarts lead to longer taking retries. + // The timeout occurs when listing machines in `GetControlPlaneMachinesByCluster`. + retryableOperationTimeout = 3 * time.Minute +) + +// WaitForStatefulSetsAvailableInput is the input for WaitForStatefulSetsAvailable. +type WaitForStatefulSetsAvailableInput struct { + Getter framework.Getter + StatefulSet *appsv1.StatefulSet +} + +// waitForStatefulSetsAvailable waits until the StatefulSet has status.Available = True, that signals that +// all the desired replicas are in place. +// This can be used to check if Cluster API controllers installed in the management cluster are working. +func waitForStatefulSetsAvailable(ctx context.Context, input WaitForStatefulSetsAvailableInput, intervals ...interface{}) { + Byf("Waiting for statefulset %s to be available", klog.KObj(input.StatefulSet)) + statefulSet := &appsv1.StatefulSet{} + Eventually(func() bool { + key := client.ObjectKey{ + Namespace: input.StatefulSet.GetNamespace(), + Name: input.StatefulSet.GetName(), + } + if err := input.Getter.Get(ctx, key, statefulSet); err != nil { + return false + } + if *statefulSet.Spec.Replicas == statefulSet.Status.AvailableReplicas { + return true + } + return false + }, intervals...).Should(BeTrue(), func() string { return DescribeFailedStatefulSet(input, statefulSet) }) +} + +// DescribeFailedStatefulSet returns detailed output to help debug a statefulSet failure in e2e. +func DescribeFailedStatefulSet(input WaitForStatefulSetsAvailableInput, statefulSet *appsv1.StatefulSet) string { + b := strings.Builder{} + b.WriteString(fmt.Sprintf("StatefulSet %s failed to get status.Available = True condition", + klog.KObj(input.StatefulSet))) + if statefulSet == nil { + b.WriteString("\nStatefulSet: nil\n") + } else { + b.WriteString(fmt.Sprintf("\nStatefulSet:\n%s\n", framework.PrettyPrint(statefulSet))) + } + return b.String() +} From f6e21ef132adf1e1c285ce9a2914c177fd900450 Mon Sep 17 00:00:00 2001 From: Christian Schlotter Date: Tue, 3 Dec 2024 16:25:50 +0100 Subject: [PATCH 2/3] fix timeout --- test/e2e/config/vsphere.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/e2e/config/vsphere.yaml b/test/e2e/config/vsphere.yaml index 66fa38552b..0a14c8914e 100644 --- a/test/e2e/config/vsphere.yaml +++ b/test/e2e/config/vsphere.yaml @@ -347,6 +347,6 @@ intervals: mhc-remediation/mhc-remediation: ["30m", "10s"] node-drain/wait-control-plane: ["15m", "10s"] node-drain/wait-deployment-available: ["3m", "10s"] - node-drain/wait-machine-deleted: ["2m", "10s"] + node-drain/wait-machine-deleted: ["10m", "10s"] node-drain/wait-statefulset-available: ["3m", "10s"] anti-affinity/wait-vm-redistribution: ["5m", "10s"] From d7e36a253a7c25dd0b062f8a60014c5a16b4e6ba Mon Sep 17 00:00:00 2001 From: Christian Schlotter Date: Wed, 4 Dec 2024 10:18:53 +0100 Subject: [PATCH 3/3] Bump CAPI --- Makefile | 2 +- go.mod | 4 ++-- go.sum | 8 ++++---- packaging/go.mod | 5 ++--- packaging/go.sum | 10 ++++------ test/e2e/config/vsphere.yaml | 6 +++--- test/go.mod | 6 +++--- test/go.sum | 12 ++++++------ 8 files changed, 25 insertions(+), 28 deletions(-) diff --git a/Makefile b/Makefile index 055a07312d..68cf7d16a7 100644 --- a/Makefile +++ b/Makefile @@ -182,7 +182,7 @@ IMPORT_BOSS_VER := v0.28.1 IMPORT_BOSS := $(abspath $(TOOLS_BIN_DIR)/$(IMPORT_BOSS_BIN)) IMPORT_BOSS_PKG := k8s.io/code-generator/cmd/import-boss -CAPI_HACK_TOOLS_VER := 721b6cf772821d789e9165511ffcb8945f808a79 # Note: this is the commit ID of CAPI v1.9.0-rc.0 +CAPI_HACK_TOOLS_VER := 5cb86c2c82439a299b9cad2f6e97971e4879115f # Note: this is the commit ID of CAPI v1.9.0-rc.1 BOSKOSCTL_BIN := boskosctl BOSKOSCTL := $(abspath $(TOOLS_BIN_DIR)/$(BOSKOSCTL_BIN)) diff --git a/go.mod b/go.mod index 2b2821eae0..4153b73e65 100644 --- a/go.mod +++ b/go.mod @@ -2,7 +2,7 @@ module sigs.k8s.io/cluster-api-provider-vsphere go 1.22.0 -replace sigs.k8s.io/cluster-api => sigs.k8s.io/cluster-api v1.9.0-rc.0 +replace sigs.k8s.io/cluster-api => sigs.k8s.io/cluster-api v1.9.0-rc.1 replace github.com/vmware-tanzu/vm-operator/pkg/constants/testlabels => github.com/vmware-tanzu/vm-operator/pkg/constants/testlabels v0.0.0-20240404200847-de75746a9505 @@ -40,7 +40,7 @@ require ( k8s.io/klog/v2 v2.130.1 k8s.io/utils v0.0.0-20240711033017-18e509b52bc8 sigs.k8s.io/cluster-api v0.0.0-00010101000000-000000000000 - sigs.k8s.io/controller-runtime v0.19.2 + sigs.k8s.io/controller-runtime v0.19.3 sigs.k8s.io/yaml v1.4.0 ) diff --git a/go.sum b/go.sum index fedcd2d12c..40165bec4a 100644 --- a/go.sum +++ b/go.sum @@ -417,10 +417,10 @@ k8s.io/utils v0.0.0-20240711033017-18e509b52bc8 h1:pUdcCO1Lk/tbT5ztQWOBi5HBgbBP1 k8s.io/utils v0.0.0-20240711033017-18e509b52bc8/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.30.3 h1:2770sDpzrjjsAtVhSeUFseziht227YAWYHLGNM8QPwY= sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.30.3/go.mod h1:Ve9uj1L+deCXFrPOk1LpFXqTg7LCFzFso6PA48q/XZw= -sigs.k8s.io/cluster-api v1.9.0-rc.0 h1:R7uBlTtCFkz2c2Oy+PGDod0yRtNRXz7dhyOkLtQBFaI= -sigs.k8s.io/cluster-api v1.9.0-rc.0/go.mod h1:Wvjujq1g8+hBQYWYra7vZLoG1VlCfSisrAtOgUUgwvI= -sigs.k8s.io/controller-runtime v0.19.2 h1:3sPrF58XQEPzbE8T81TN6selQIMGbtYwuaJ6eDssDF8= -sigs.k8s.io/controller-runtime v0.19.2/go.mod h1:iRmWllt8IlaLjvTTDLhRBXIEtkCK6hwVBJJsYS9Ajf4= +sigs.k8s.io/cluster-api v1.9.0-rc.1 h1:fWrcw3p3nvzUjbQpB/GsTP+LeAVN4wjXV8LFkIZmN7A= +sigs.k8s.io/cluster-api v1.9.0-rc.1/go.mod h1:8rjpkMxLFcA87Y3P6NOi6E9RMZv2uRnN9ppOPAxrTAY= +sigs.k8s.io/controller-runtime v0.19.3 h1:XO2GvC9OPftRst6xWCpTgBZO04S2cbp0Qqkj8bX1sPw= +sigs.k8s.io/controller-runtime v0.19.3/go.mod h1:j4j87DqtsThvwTv5/Tc5NFRyyF/RF0ip4+62tbTSIUM= sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo= sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0= sigs.k8s.io/structured-merge-diff v0.0.0-20190525122527-15d366b2352e/go.mod h1:wWxsB5ozmmv/SG7nM11ayaAW51xMvak/t1r0CSlcokI= diff --git a/packaging/go.mod b/packaging/go.mod index 07c19fc8b6..275215e5d4 100644 --- a/packaging/go.mod +++ b/packaging/go.mod @@ -2,7 +2,7 @@ module sigs.k8s.io/cluster-api-provider-vsphere/packaging go 1.22.7 -replace sigs.k8s.io/cluster-api => sigs.k8s.io/cluster-api v1.9.0-rc.0 +replace sigs.k8s.io/cluster-api => sigs.k8s.io/cluster-api v1.9.0-rc.1 replace sigs.k8s.io/cluster-api-provider-vsphere => ../ @@ -16,7 +16,7 @@ require ( k8s.io/utils v0.0.0-20240711033017-18e509b52bc8 sigs.k8s.io/cluster-api v0.0.0-00010101000000-000000000000 sigs.k8s.io/cluster-api-provider-vsphere v0.0.0-00010101000000-000000000000 - sigs.k8s.io/controller-runtime v0.19.2 + sigs.k8s.io/controller-runtime v0.19.3 sigs.k8s.io/kustomize/api v0.18.0 sigs.k8s.io/kustomize/kyaml v0.18.1 sigs.k8s.io/yaml v1.4.0 @@ -30,7 +30,6 @@ require ( github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect github.com/emicklei/go-restful/v3 v3.12.1 // indirect github.com/evanphx/json-patch/v5 v5.9.0 // indirect - github.com/fsnotify/fsnotify v1.7.0 // indirect github.com/fxamacker/cbor/v2 v2.7.0 // indirect github.com/go-errors/errors v1.4.2 // indirect github.com/go-logr/logr v1.4.2 // indirect diff --git a/packaging/go.sum b/packaging/go.sum index 2e628ac223..0441f1661c 100644 --- a/packaging/go.sum +++ b/packaging/go.sum @@ -20,8 +20,6 @@ github.com/evanphx/json-patch v5.7.0+incompatible h1:vgGkfT/9f8zE6tvSCe74nfpAVDQ github.com/evanphx/json-patch v5.7.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/evanphx/json-patch/v5 v5.9.0 h1:kcBlZQbplgElYIlo/n1hJbls2z/1awpXxpRi0/FOJfg= github.com/evanphx/json-patch/v5 v5.9.0/go.mod h1:VNkHZ/282BpEyt/tObQO8s5CMPmYYq14uClGH4abBuQ= -github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA= -github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM= github.com/fxamacker/cbor/v2 v2.7.0 h1:iM5WgngdRBanHcxugY4JySA0nk1wZorNOpTgCMedv5E= github.com/fxamacker/cbor/v2 v2.7.0/go.mod h1:pxXPTn3joSm21Gbwsv0w9OSA2y1HFR9qXEeXQVeNoDQ= github.com/go-errors/errors v1.4.2 h1:J6MZopCL4uSllY1OfXM374weqZFFItUbrImctkmUxIA= @@ -232,10 +230,10 @@ k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340 h1:BZqlfIlq5YbRMFko6/PM7F k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340/go.mod h1:yD4MZYeKMBwQKVht279WycxKyM84kkAx2DPrTXaeb98= k8s.io/utils v0.0.0-20240711033017-18e509b52bc8 h1:pUdcCO1Lk/tbT5ztQWOBi5HBgbBP1J8+AsQnQCKsi8A= k8s.io/utils v0.0.0-20240711033017-18e509b52bc8/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= -sigs.k8s.io/cluster-api v1.9.0-rc.0 h1:R7uBlTtCFkz2c2Oy+PGDod0yRtNRXz7dhyOkLtQBFaI= -sigs.k8s.io/cluster-api v1.9.0-rc.0/go.mod h1:Wvjujq1g8+hBQYWYra7vZLoG1VlCfSisrAtOgUUgwvI= -sigs.k8s.io/controller-runtime v0.19.2 h1:3sPrF58XQEPzbE8T81TN6selQIMGbtYwuaJ6eDssDF8= -sigs.k8s.io/controller-runtime v0.19.2/go.mod h1:iRmWllt8IlaLjvTTDLhRBXIEtkCK6hwVBJJsYS9Ajf4= +sigs.k8s.io/cluster-api v1.9.0-rc.1 h1:fWrcw3p3nvzUjbQpB/GsTP+LeAVN4wjXV8LFkIZmN7A= +sigs.k8s.io/cluster-api v1.9.0-rc.1/go.mod h1:8rjpkMxLFcA87Y3P6NOi6E9RMZv2uRnN9ppOPAxrTAY= +sigs.k8s.io/controller-runtime v0.19.3 h1:XO2GvC9OPftRst6xWCpTgBZO04S2cbp0Qqkj8bX1sPw= +sigs.k8s.io/controller-runtime v0.19.3/go.mod h1:j4j87DqtsThvwTv5/Tc5NFRyyF/RF0ip4+62tbTSIUM= sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo= sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0= sigs.k8s.io/kustomize/api v0.18.0 h1:hTzp67k+3NEVInwz5BHyzc9rGxIauoXferXyjv5lWPo= diff --git a/test/e2e/config/vsphere.yaml b/test/e2e/config/vsphere.yaml index 0a14c8914e..496f2e04f6 100644 --- a/test/e2e/config/vsphere.yaml +++ b/test/e2e/config/vsphere.yaml @@ -27,7 +27,7 @@ providers: type: CoreProvider versions: - name: "v1.9.0" - value: "https://github.com/kubernetes-sigs/cluster-api/releases/download/v1.9.0-rc.0/core-components.yaml" + value: "https://github.com/kubernetes-sigs/cluster-api/releases/download/v1.9.0-rc.1/core-components.yaml" type: "url" contract: v1beta1 files: @@ -67,7 +67,7 @@ providers: type: BootstrapProvider versions: - name: "v1.9.0" - value: "https://github.com/kubernetes-sigs/cluster-api/releases/download/v1.9.0-rc.0/bootstrap-components.yaml" + value: "https://github.com/kubernetes-sigs/cluster-api/releases/download/v1.9.0-rc.1/bootstrap-components.yaml" type: "url" contract: v1beta1 files: @@ -107,7 +107,7 @@ providers: type: ControlPlaneProvider versions: - name: "v1.9.0" - value: "https://github.com/kubernetes-sigs/cluster-api/releases/download/v1.9.0-rc.0/control-plane-components.yaml" + value: "https://github.com/kubernetes-sigs/cluster-api/releases/download/v1.9.0-rc.1/control-plane-components.yaml" type: "url" contract: v1beta1 files: diff --git a/test/go.mod b/test/go.mod index 1e58621afb..fdebe3e946 100644 --- a/test/go.mod +++ b/test/go.mod @@ -2,9 +2,9 @@ module sigs.k8s.io/cluster-api-provider-vsphere/test go 1.22.0 -replace sigs.k8s.io/cluster-api => sigs.k8s.io/cluster-api v1.9.0-rc.0 +replace sigs.k8s.io/cluster-api => sigs.k8s.io/cluster-api v1.9.0-rc.1 -replace sigs.k8s.io/cluster-api/test => sigs.k8s.io/cluster-api/test v1.9.0-rc.0 +replace sigs.k8s.io/cluster-api/test => sigs.k8s.io/cluster-api/test v1.9.0-rc.1.0.20241204073105-63592b4d3c1c replace sigs.k8s.io/cluster-api-provider-vsphere => ../ @@ -36,7 +36,7 @@ require ( sigs.k8s.io/cluster-api v0.0.0-00010101000000-000000000000 sigs.k8s.io/cluster-api-provider-vsphere v0.0.0-00010101000000-000000000000 sigs.k8s.io/cluster-api/test v0.0.0-00010101000000-000000000000 - sigs.k8s.io/controller-runtime v0.19.2 + sigs.k8s.io/controller-runtime v0.19.3 sigs.k8s.io/yaml v1.4.0 ) diff --git a/test/go.sum b/test/go.sum index caf5e6f5e4..a13485a111 100644 --- a/test/go.sum +++ b/test/go.sum @@ -508,12 +508,12 @@ k8s.io/utils v0.0.0-20240711033017-18e509b52bc8 h1:pUdcCO1Lk/tbT5ztQWOBi5HBgbBP1 k8s.io/utils v0.0.0-20240711033017-18e509b52bc8/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.30.3 h1:2770sDpzrjjsAtVhSeUFseziht227YAWYHLGNM8QPwY= sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.30.3/go.mod h1:Ve9uj1L+deCXFrPOk1LpFXqTg7LCFzFso6PA48q/XZw= -sigs.k8s.io/cluster-api v1.9.0-rc.0 h1:R7uBlTtCFkz2c2Oy+PGDod0yRtNRXz7dhyOkLtQBFaI= -sigs.k8s.io/cluster-api v1.9.0-rc.0/go.mod h1:Wvjujq1g8+hBQYWYra7vZLoG1VlCfSisrAtOgUUgwvI= -sigs.k8s.io/cluster-api/test v1.9.0-rc.0 h1:lYDvc9cmCKon2zjwE5qetRO7j2YfJ9MMZeb4Hv89Tlc= -sigs.k8s.io/cluster-api/test v1.9.0-rc.0/go.mod h1:SQKcvXlJ1fxk+gWRYp9J/sq6jKmd7onoDLqDFQbxt9M= -sigs.k8s.io/controller-runtime v0.19.2 h1:3sPrF58XQEPzbE8T81TN6selQIMGbtYwuaJ6eDssDF8= -sigs.k8s.io/controller-runtime v0.19.2/go.mod h1:iRmWllt8IlaLjvTTDLhRBXIEtkCK6hwVBJJsYS9Ajf4= +sigs.k8s.io/cluster-api v1.9.0-rc.1 h1:fWrcw3p3nvzUjbQpB/GsTP+LeAVN4wjXV8LFkIZmN7A= +sigs.k8s.io/cluster-api v1.9.0-rc.1/go.mod h1:8rjpkMxLFcA87Y3P6NOi6E9RMZv2uRnN9ppOPAxrTAY= +sigs.k8s.io/cluster-api/test v1.9.0-rc.1.0.20241204073105-63592b4d3c1c h1:VzHdruK9R/5/Juw2ncG+d6F2QV9GiZN2IW2VBLIQuj8= +sigs.k8s.io/cluster-api/test v1.9.0-rc.1.0.20241204073105-63592b4d3c1c/go.mod h1:w09AhrOiWTjN9ToL4xaMZLnutMFe78x03POGZ3Fsbeg= +sigs.k8s.io/controller-runtime v0.19.3 h1:XO2GvC9OPftRst6xWCpTgBZO04S2cbp0Qqkj8bX1sPw= +sigs.k8s.io/controller-runtime v0.19.3/go.mod h1:j4j87DqtsThvwTv5/Tc5NFRyyF/RF0ip4+62tbTSIUM= sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo= sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0= sigs.k8s.io/kind v0.25.0 h1:ugUvgesHKKA0yKmD6QtYTiEev+kPUpGxdTPbMGf8VTU=