From e8d11fe615be4d12a9cca8b798b1c8d27da7a161 Mon Sep 17 00:00:00 2001 From: killianmuldoon Date: Wed, 16 Nov 2022 18:09:23 +0000 Subject: [PATCH 01/87] Update e2e components to v1.2.5 Signed-off-by: killianmuldoon --- test/e2e/clusterctl_upgrade_test.go | 4 ++-- test/e2e/config/docker.yaml | 16 ++++++++-------- 2 files changed, 10 insertions(+), 10 deletions(-) diff --git a/test/e2e/clusterctl_upgrade_test.go b/test/e2e/clusterctl_upgrade_test.go index 39e90acfd514..33996b2c5ed2 100644 --- a/test/e2e/clusterctl_upgrade_test.go +++ b/test/e2e/clusterctl_upgrade_test.go @@ -66,7 +66,7 @@ var _ = Describe("When testing clusterctl upgrades (v1.2=>current)", func() { BootstrapClusterProxy: bootstrapClusterProxy, ArtifactFolder: artifactFolder, SkipCleanup: skipCleanup, - InitWithBinary: "https://github.com/kubernetes-sigs/cluster-api/releases/download/v1.2.2/clusterctl-{OS}-{ARCH}", + InitWithBinary: "https://github.com/kubernetes-sigs/cluster-api/releases/download/v1.2.5/clusterctl-{OS}-{ARCH}", InitWithProvidersContract: "v1beta1", InitWithKubernetesVersion: "v1.25.0", } @@ -81,7 +81,7 @@ var _ = Describe("When testing clusterctl upgrades using ClusterClass (v1.2=>cur BootstrapClusterProxy: bootstrapClusterProxy, ArtifactFolder: artifactFolder, SkipCleanup: skipCleanup, - InitWithBinary: "https://github.com/kubernetes-sigs/cluster-api/releases/download/v1.2.2/clusterctl-{OS}-{ARCH}", + InitWithBinary: "https://github.com/kubernetes-sigs/cluster-api/releases/download/v1.2.5/clusterctl-{OS}-{ARCH}", InitWithProvidersContract: "v1beta1", InitWithKubernetesVersion: "v1.25.0", WorkloadFlavor: "topology", diff --git a/test/e2e/config/docker.yaml b/test/e2e/config/docker.yaml index 8ee83c2d5c21..f8aebbb1e018 100644 --- a/test/e2e/config/docker.yaml +++ b/test/e2e/config/docker.yaml @@ -49,8 +49,8 @@ providers: new: --metrics-addr=:8080 files: - sourcePath: "../data/shared/v1alpha4/metadata.yaml" - - name: v1.2.2 # latest published release in the v1beta1 series; this is used for v1beta1 --> main clusterctl upgrades test only. - value: "https://github.com/kubernetes-sigs/cluster-api/releases/download/v1.2.2/core-components.yaml" + - name: v1.2.5 # latest published release in the v1beta1 series; this is used for v1beta1 --> main clusterctl upgrades test only. + value: "https://github.com/kubernetes-sigs/cluster-api/releases/download/v1.2.5/core-components.yaml" type: "url" contract: v1beta1 replacements: @@ -87,8 +87,8 @@ providers: new: --metrics-addr=:8080 files: - sourcePath: "../data/shared/v1alpha4/metadata.yaml" - - name: v1.2.2 # latest published release in the v1beta1 series; this is used for v1beta1 --> main clusterctl upgrades test only. - value: "https://github.com/kubernetes-sigs/cluster-api/releases/download/v1.2.2/bootstrap-components.yaml" + - name: v1.2.5 # latest published release in the v1beta1 series; this is used for v1beta1 --> main clusterctl upgrades test only. + value: "https://github.com/kubernetes-sigs/cluster-api/releases/download/v1.2.5/bootstrap-components.yaml" type: "url" contract: v1beta1 replacements: @@ -125,8 +125,8 @@ providers: new: --metrics-addr=:8080 files: - sourcePath: "../data/shared/v1alpha4/metadata.yaml" - - name: v1.2.2 # latest published release in the v1beta1 series; this is used for v1beta1 --> main clusterctl upgrades test only. - value: "https://github.com/kubernetes-sigs/cluster-api/releases/download/v1.2.2/control-plane-components.yaml" + - name: v1.2.5 # latest published release in the v1beta1 series; this is used for v1beta1 --> main clusterctl upgrades test only. + value: "https://github.com/kubernetes-sigs/cluster-api/releases/download/v1.2.5/control-plane-components.yaml" type: "url" contract: v1beta1 replacements: @@ -165,8 +165,8 @@ providers: files: - sourcePath: "../data/shared/v1alpha4/metadata.yaml" - sourcePath: "../data/infrastructure-docker/v1alpha4/cluster-template.yaml" - - name: v1.2.2 # latest published release in the v1beta1 series; this is used for v1beta1 --> main clusterctl upgrades test only. - value: "https://github.com/kubernetes-sigs/cluster-api/releases/download/v1.2.2/infrastructure-components-development.yaml" + - name: v1.2.5 # latest published release in the v1beta1 series; this is used for v1beta1 --> main clusterctl upgrades test only. + value: "https://github.com/kubernetes-sigs/cluster-api/releases/download/v1.2.5/infrastructure-components-development.yaml" type: "url" contract: v1beta1 replacements: From a61b8b1d8426c2a537746b025084bc7873ce3145 Mon Sep 17 00:00:00 2001 From: k8s-infra-cherrypick-robot <90416843+k8s-infra-cherrypick-robot@users.noreply.github.com> Date: Thu, 17 Nov 2022 00:04:40 -0800 Subject: [PATCH 02/87] [release-1.3] :sparkles:add kubekey k3s boostrap and control plane provider (#7554) * add kubekey k3s boostrap and control plane provider Signed-off-by: 24sama * remove useless tab Signed-off-by: 24sama Signed-off-by: 24sama Co-authored-by: 24sama --- .../client/config/providers_client.go | 26 +++++--- cmd/clusterctl/client/config_test.go | 4 ++ .../cmd/config_repositories_test.go | 10 ++++ docs/book/src/clusterctl/provider-contract.md | 60 ++++++++++--------- docs/book/src/user/quick-start.md | 5 +- 5 files changed, 67 insertions(+), 38 deletions(-) diff --git a/cmd/clusterctl/client/config/providers_client.go b/cmd/clusterctl/client/config/providers_client.go index 1eb6fbf9b27a..c77c740039be 100644 --- a/cmd/clusterctl/client/config/providers_client.go +++ b/cmd/clusterctl/client/config/providers_client.go @@ -65,17 +65,19 @@ const ( // Bootstrap providers. const ( - KubeadmBootstrapProviderName = "kubeadm" - TalosBootstrapProviderName = "talos" - MicroK8sBootstrapProviderName = "microk8s" + KubeadmBootstrapProviderName = "kubeadm" + TalosBootstrapProviderName = "talos" + MicroK8sBootstrapProviderName = "microk8s" + KubeKeyK3sBootstrapProviderName = "kubekey-k3s" ) // ControlPlane providers. const ( - KubeadmControlPlaneProviderName = "kubeadm" - TalosControlPlaneProviderName = "talos" - MicroK8sControlPlaneProviderName = "microk8s" - NestedControlPlaneProviderName = "nested" + KubeadmControlPlaneProviderName = "kubeadm" + TalosControlPlaneProviderName = "talos" + MicroK8sControlPlaneProviderName = "microk8s" + NestedControlPlaneProviderName = "nested" + KubeKeyK3sControlPlaneProviderName = "kubekey-k3s" ) // Other. @@ -253,6 +255,11 @@ func (p *providersClient) defaults() []Provider { url: "https://github.com/kubernetes-sigs/cluster-api/releases/latest/bootstrap-components.yaml", providerType: clusterctlv1.BootstrapProviderType, }, + &provider{ + name: KubeKeyK3sBootstrapProviderName, + url: "https://github.com/kubesphere/kubekey/releases/latest/bootstrap-components.yaml", + providerType: clusterctlv1.BootstrapProviderType, + }, &provider{ name: TalosBootstrapProviderName, url: "https://github.com/siderolabs/cluster-api-bootstrap-provider-talos/releases/latest/bootstrap-components.yaml", @@ -269,6 +276,11 @@ func (p *providersClient) defaults() []Provider { url: "https://github.com/kubernetes-sigs/cluster-api/releases/latest/control-plane-components.yaml", providerType: clusterctlv1.ControlPlaneProviderType, }, + &provider{ + name: KubeKeyK3sControlPlaneProviderName, + url: "https://github.com/kubesphere/kubekey/releases/latest/control-plane-components.yaml", + providerType: clusterctlv1.ControlPlaneProviderType, + }, &provider{ name: TalosControlPlaneProviderName, url: "https://github.com/siderolabs/cluster-api-control-plane-provider-talos/releases/latest/control-plane-components.yaml", diff --git a/cmd/clusterctl/client/config_test.go b/cmd/clusterctl/client/config_test.go index fe8ebbd5e591..8743d74a4056 100644 --- a/cmd/clusterctl/client/config_test.go +++ b/cmd/clusterctl/client/config_test.go @@ -57,9 +57,11 @@ func Test_clusterctlClient_GetProvidersConfig(t *testing.T) { wantProviders: []string{ config.ClusterAPIProviderName, config.KubeadmBootstrapProviderName, + config.KubeKeyK3sBootstrapProviderName, config.MicroK8sBootstrapProviderName, config.TalosBootstrapProviderName, config.KubeadmControlPlaneProviderName, + config.KubeKeyK3sControlPlaneProviderName, config.MicroK8sControlPlaneProviderName, config.NestedControlPlaneProviderName, config.TalosControlPlaneProviderName, @@ -100,9 +102,11 @@ func Test_clusterctlClient_GetProvidersConfig(t *testing.T) { config.ClusterAPIProviderName, customProviderConfig.Name(), config.KubeadmBootstrapProviderName, + config.KubeKeyK3sBootstrapProviderName, config.MicroK8sBootstrapProviderName, config.TalosBootstrapProviderName, config.KubeadmControlPlaneProviderName, + config.KubeKeyK3sControlPlaneProviderName, config.MicroK8sControlPlaneProviderName, config.NestedControlPlaneProviderName, config.TalosControlPlaneProviderName, diff --git a/cmd/clusterctl/cmd/config_repositories_test.go b/cmd/clusterctl/cmd/config_repositories_test.go index 13fd40ec075c..d91806ef6cf2 100644 --- a/cmd/clusterctl/cmd/config_repositories_test.go +++ b/cmd/clusterctl/cmd/config_repositories_test.go @@ -103,9 +103,11 @@ var expectedOutputText = `NAME TYPE URL cluster-api CoreProvider https://github.com/myorg/myforkofclusterapi/releases/latest/ core_components.yaml another-provider BootstrapProvider ./ bootstrap-components.yaml kubeadm BootstrapProvider https://github.com/kubernetes-sigs/cluster-api/releases/latest/ bootstrap-components.yaml +kubekey-k3s BootstrapProvider https://github.com/kubesphere/kubekey/releases/latest/ bootstrap-components.yaml microk8s BootstrapProvider https://github.com/canonical/cluster-api-bootstrap-provider-microk8s/releases/latest/ bootstrap-components.yaml talos BootstrapProvider https://github.com/siderolabs/cluster-api-bootstrap-provider-talos/releases/latest/ bootstrap-components.yaml kubeadm ControlPlaneProvider https://github.com/kubernetes-sigs/cluster-api/releases/latest/ control-plane-components.yaml +kubekey-k3s ControlPlaneProvider https://github.com/kubesphere/kubekey/releases/latest/ control-plane-components.yaml microk8s ControlPlaneProvider https://github.com/canonical/cluster-api-control-plane-provider-microk8s/releases/latest/ control-plane-components.yaml nested ControlPlaneProvider https://github.com/kubernetes-sigs/cluster-api-provider-nested/releases/latest/ control-plane-components.yaml talos ControlPlaneProvider https://github.com/siderolabs/cluster-api-control-plane-provider-talos/releases/latest/ control-plane-components.yaml @@ -148,6 +150,10 @@ var expectedOutputYaml = `- File: core_components.yaml Name: kubeadm ProviderType: BootstrapProvider URL: https://github.com/kubernetes-sigs/cluster-api/releases/latest/ +- File: bootstrap-components.yaml + Name: kubekey-k3s + ProviderType: BootstrapProvider + URL: https://github.com/kubesphere/kubekey/releases/latest/ - File: bootstrap-components.yaml Name: microk8s ProviderType: BootstrapProvider @@ -160,6 +166,10 @@ var expectedOutputYaml = `- File: core_components.yaml Name: kubeadm ProviderType: ControlPlaneProvider URL: https://github.com/kubernetes-sigs/cluster-api/releases/latest/ +- File: control-plane-components.yaml + Name: kubekey-k3s + ProviderType: ControlPlaneProvider + URL: https://github.com/kubesphere/kubekey/releases/latest/ - File: control-plane-components.yaml Name: microk8s ProviderType: ControlPlaneProvider diff --git a/docs/book/src/clusterctl/provider-contract.md b/docs/book/src/clusterctl/provider-contract.md index 8de944fd3b25..ba132d0ceeb8 100644 --- a/docs/book/src/clusterctl/provider-contract.md +++ b/docs/book/src/clusterctl/provider-contract.md @@ -242,35 +242,37 @@ easier transition from `kubectl apply` to `clusterctl`. As a reference you can consider the labels applied to the following providers. -| Provider Name| Label | -|--------------|-------------------------------------------------------| -|CAPI | cluster.x-k8s.io/provider=cluster-api | -|CABPK | cluster.x-k8s.io/provider=bootstrap-kubeadm | -|CABPM | cluster.x-k8s.io/provider=bootstrap-microk8s | -|CACPK | cluster.x-k8s.io/provider=control-plane-kubeadm | -|CACPM | cluster.x-k8s.io/provider=control-plane-microk8s | -|CACPN | cluster.x-k8s.io/provider=control-plane-nested | -|CAPA | cluster.x-k8s.io/provider=infrastructure-aws | -|CAPB | cluster.x-k8s.io/provider=infrastructure-byoh | -|CAPC | cluster.x-k8s.io/provider=infrastructure-cloudstack | -|CAPD | cluster.x-k8s.io/provider=infrastructure-docker | -|CAPDO | cluster.x-k8s.io/provider=infrastructure-digitalocean | -|CAPG | cluster.x-k8s.io/provider=infrastructure-gcp | -|CAPH | cluster.x-k8s.io/provider=infrastructure-hetzner | -|CAPIBM | cluster.x-k8s.io/provider=infrastructure-ibmcloud | -|CAPKK | cluster.x-k8s.io/provider=infrastructure-kubekey | -|CAPK | cluster.x-k8s.io/provider=infrastructure-kubevirt | -|CAPM3 | cluster.x-k8s.io/provider=infrastructure-metal3 | -|CAPN | cluster.x-k8s.io/provider=infrastructure-nested | -|CAPO | cluster.x-k8s.io/provider=infrastructure-openstack | -|CAPOCI | cluster.x-k8s.io/provider=infrastructure-oci | -|CAPP | cluster.x-k8s.io/provider=infrastructure-packet | -|CAPV | cluster.x-k8s.io/provider=infrastructure-vsphere | -|CAPVC | cluster.x-k8s.io/provider=infrastructure-vcluster | -|CAPVCD | cluster.x-k8s.io/provider=infrastructure-vcd | -|CAPX | cluster.x-k8s.io/provider=infrastructure-nutanix | -|CAPZ | cluster.x-k8s.io/provider=infrastructure-azure | -|CAPOSC | cluster.x-k8s.io/provider=infrastructure-outscale | +| Provider Name | Label | +|---------------|-------------------------------------------------------| +| CAPI | cluster.x-k8s.io/provider=cluster-api | +| CABPK | cluster.x-k8s.io/provider=bootstrap-kubeadm | +| CABPM | cluster.x-k8s.io/provider=bootstrap-microk8s | +| CABPKK3S | cluster.x-k8s.io/provider=bootstrap-kubekey-k3s | +| CACPK | cluster.x-k8s.io/provider=control-plane-kubeadm | +| CACPM | cluster.x-k8s.io/provider=control-plane-microk8s | +| CACPN | cluster.x-k8s.io/provider=control-plane-nested | +| CACPKK3S | cluster.x-k8s.io/provider=control-plane-kubekey-k3s | +| CAPA | cluster.x-k8s.io/provider=infrastructure-aws | +| CAPB | cluster.x-k8s.io/provider=infrastructure-byoh | +| CAPC | cluster.x-k8s.io/provider=infrastructure-cloudstack | +| CAPD | cluster.x-k8s.io/provider=infrastructure-docker | +| CAPDO | cluster.x-k8s.io/provider=infrastructure-digitalocean | +| CAPG | cluster.x-k8s.io/provider=infrastructure-gcp | +| CAPH | cluster.x-k8s.io/provider=infrastructure-hetzner | +| CAPIBM | cluster.x-k8s.io/provider=infrastructure-ibmcloud | +| CAPKK | cluster.x-k8s.io/provider=infrastructure-kubekey | +| CAPK | cluster.x-k8s.io/provider=infrastructure-kubevirt | +| CAPM3 | cluster.x-k8s.io/provider=infrastructure-metal3 | +| CAPN | cluster.x-k8s.io/provider=infrastructure-nested | +| CAPO | cluster.x-k8s.io/provider=infrastructure-openstack | +| CAPOCI | cluster.x-k8s.io/provider=infrastructure-oci | +| CAPP | cluster.x-k8s.io/provider=infrastructure-packet | +| CAPV | cluster.x-k8s.io/provider=infrastructure-vsphere | +| CAPVC | cluster.x-k8s.io/provider=infrastructure-vcluster | +| CAPVCD | cluster.x-k8s.io/provider=infrastructure-vcd | +| CAPX | cluster.x-k8s.io/provider=infrastructure-nutanix | +| CAPZ | cluster.x-k8s.io/provider=infrastructure-azure | +| CAPOSC | cluster.x-k8s.io/provider=infrastructure-outscale | ### Workload cluster templates An infrastructure provider could publish a **cluster templates** file to be used by `clusterctl generate cluster`. diff --git a/docs/book/src/user/quick-start.md b/docs/book/src/user/quick-start.md index c067d6619560..603ec4e31402 100644 --- a/docs/book/src/user/quick-start.md +++ b/docs/book/src/user/quick-start.md @@ -441,8 +441,6 @@ clusterctl init --infrastructure ibmcloud clusterctl init --infrastructure kubekey ``` -{{#/tab }} - {{#/tab }} {{#tab Kubevirt}} @@ -819,6 +817,8 @@ export INSTANCES= export CONTROL_PLANE_ENDPOINT_IP= ``` +Please visit the [KubeKey provider] for more information. + {{#/tab }} {{#tab Kubevirt}} @@ -1243,6 +1243,7 @@ See the [clusterctl] documentation for more detail about clusterctl supported ac [management cluster]: ../reference/glossary.md#management-cluster [Metal3 getting started guide]: https://github.com/metal3-io/cluster-api-provider-metal3/blob/master/docs/getting-started.md [Metal3 provider]: https://github.com/metal3-io/cluster-api-provider-metal3/ +[KubeKey provider]: https://github.com/kubesphere/kubekey [Kubevirt provider]: https://github.com/kubernetes-sigs/cluster-api-provider-kubevirt/ [oci-provider]: https://oracle.github.io/cluster-api-provider-oci/#getting-started [Equinix Metal getting started guide]: https://github.com/kubernetes-sigs/cluster-api-provider-packet#using From 6f777bb87c5ceb5b1340ff7588aeb3b74a4f3846 Mon Sep 17 00:00:00 2001 From: killianmuldoon Date: Fri, 11 Nov 2022 15:23:14 +0000 Subject: [PATCH 03/87] Add finalizer reconcile for Topology MachineDeployments and MachineSets Signed-off-by: killianmuldoon --- .../machinedeployment_sync.go | 13 --- .../cluster/cluster_controller_test.go | 15 +--- .../topology/cluster/desired_state.go | 8 -- .../topology/cluster/desired_state_test.go | 3 - .../machinedeployment_controller.go | 28 +++--- .../machinedeployment_controller_test.go | 76 +++++++++++++++- .../machineset/machineset_controller.go | 25 ++++-- .../machineset/machineset_controller_test.go | 88 +++++++++++++++++++ internal/test/builder/builders.go | 28 ++++-- .../test/builder/zz_generated.deepcopy.go | 7 ++ 10 files changed, 227 insertions(+), 64 deletions(-) diff --git a/internal/controllers/machinedeployment/machinedeployment_sync.go b/internal/controllers/machinedeployment/machinedeployment_sync.go index 241f27c7fd08..526629a1a2e8 100644 --- a/internal/controllers/machinedeployment/machinedeployment_sync.go +++ b/internal/controllers/machinedeployment/machinedeployment_sync.go @@ -35,11 +35,9 @@ import ( "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" - "sigs.k8s.io/cluster-api/feature" "sigs.k8s.io/cluster-api/internal/controllers/machinedeployment/mdutil" "sigs.k8s.io/cluster-api/util" "sigs.k8s.io/cluster-api/util/conditions" - "sigs.k8s.io/cluster-api/util/labels" "sigs.k8s.io/cluster-api/util/patch" ) @@ -178,17 +176,6 @@ func (r *Reconciler) getNewMachineSet(ctx context.Context, d *clusterv1.MachineD }, } - if feature.Gates.Enabled(feature.ClusterTopology) { - // If the MachineDeployment is owned by a Cluster Topology, - // add the finalizer to allow the topology controller to - // clean up resources when the MachineSet is deleted. - // MachineSets are deleted during rollout (e.g. template rotation) and - // after MachineDeployment deletion. - if labels.IsTopologyOwned(d) { - controllerutil.AddFinalizer(&newMS, clusterv1.MachineSetTopologyFinalizer) - } - } - if d.Spec.Strategy.RollingUpdate.DeletePolicy != nil { newMS.Spec.DeletePolicy = *d.Spec.Strategy.RollingUpdate.DeletePolicy } diff --git a/internal/controllers/topology/cluster/cluster_controller_test.go b/internal/controllers/topology/cluster/cluster_controller_test.go index 84efee37e4ba..6299c578c68d 100644 --- a/internal/controllers/topology/cluster/cluster_controller_test.go +++ b/internal/controllers/topology/cluster/cluster_controller_test.go @@ -911,9 +911,8 @@ func assertControlPlaneReconcile(cluster *clusterv1.Cluster) error { // assertMachineDeploymentsReconcile checks if the MachineDeployments: // 1) Are created in the correct number. // 2) Have the correct labels (TopologyOwned, ClusterName, MachineDeploymentName). -// 3) Have the correct finalizer applied. -// 4) Have the correct replicas and version. -// 6) Have the correct Kind/APIVersion and Labels/Annotations for BoostrapRef and InfrastructureRef templates. +// 3) Have the correct replicas and version. +// 4) Have the correct Kind/APIVersion and Labels/Annotations for BoostrapRef and InfrastructureRef templates. func assertMachineDeploymentsReconcile(cluster *clusterv1.Cluster) error { // List all created machine deployments to assert the expected numbers are created. machineDeployments := &clusterv1.MachineDeploymentList{} @@ -950,16 +949,6 @@ func assertMachineDeploymentsReconcile(cluster *clusterv1.Cluster) error { continue } - // Assert that the correct Finalizer has been added to the MachineDeployment. - for _, f := range md.Finalizers { - // Break as soon as we find a matching finalizer. - if f == clusterv1.MachineDeploymentTopologyFinalizer { - break - } - // False if the finalizer is not present on the MachineDeployment. - return fmt.Errorf("finalizer %v not found on MachineDeployment", clusterv1.MachineDeploymentTopologyFinalizer) - } - // Check if the ClusterTopologyLabelName and ClusterTopologyOwnedLabel are set correctly. if err := assertClusterTopologyOwnedLabel(&md); err != nil { return err diff --git a/internal/controllers/topology/cluster/desired_state.go b/internal/controllers/topology/cluster/desired_state.go index 08f670bd5a22..b4d8828c115a 100644 --- a/internal/controllers/topology/cluster/desired_state.go +++ b/internal/controllers/topology/cluster/desired_state.go @@ -28,7 +28,6 @@ import ( "k8s.io/apiserver/pkg/storage/names" "k8s.io/utils/pointer" "sigs.k8s.io/controller-runtime/pkg/client" - "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" "sigs.k8s.io/cluster-api/controllers/external" @@ -678,13 +677,6 @@ func computeMachineDeployment(_ context.Context, s *scope.Scope, desiredControlP }, } - // If it's a new MachineDeployment, set the finalizer. - // Note: we only add it on creation to avoid race conditions later on when - // the MachineDeployment topology controller removes the finalizer. - if currentMachineDeployment == nil { - controllerutil.AddFinalizer(desiredMachineDeploymentObj, clusterv1.MachineDeploymentTopologyFinalizer) - } - // If an existing MachineDeployment is present, override the MachineDeployment generate name // re-using the existing name (this will help in reconcile). if currentMachineDeployment != nil && currentMachineDeployment.Object != nil { diff --git a/internal/controllers/topology/cluster/desired_state_test.go b/internal/controllers/topology/cluster/desired_state_test.go index 7ae9b3a0414a..32414d7522d6 100644 --- a/internal/controllers/topology/cluster/desired_state_test.go +++ b/internal/controllers/topology/cluster/desired_state_test.go @@ -30,7 +30,6 @@ import ( utilfeature "k8s.io/component-base/featuregate/testing" "k8s.io/utils/pointer" "sigs.k8s.io/controller-runtime/pkg/client/fake" - "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" runtimev1 "sigs.k8s.io/cluster-api/exp/runtime/api/v1alpha1" @@ -1443,7 +1442,6 @@ func TestComputeMachineDeployment(t *testing.T) { g.Expect(actualMd.Labels).To(HaveKeyWithValue(clusterv1.ClusterTopologyMachineDeploymentLabelName, "big-pool-of-machines")) g.Expect(actualMd.Labels).To(HaveKey(clusterv1.ClusterTopologyOwnedLabel)) - g.Expect(controllerutil.ContainsFinalizer(actualMd, clusterv1.MachineDeploymentTopologyFinalizer)).To(BeTrue()) g.Expect(actualMd.Spec.Selector.MatchLabels).To(HaveKey(clusterv1.ClusterTopologyOwnedLabel)) g.Expect(actualMd.Spec.Selector.MatchLabels).To(HaveKeyWithValue(clusterv1.ClusterTopologyMachineDeploymentLabelName, "big-pool-of-machines")) @@ -1524,7 +1522,6 @@ func TestComputeMachineDeployment(t *testing.T) { g.Expect(actualMd.Labels).To(HaveKeyWithValue(clusterv1.ClusterTopologyMachineDeploymentLabelName, "big-pool-of-machines")) g.Expect(actualMd.Labels).To(HaveKey(clusterv1.ClusterTopologyOwnedLabel)) - g.Expect(controllerutil.ContainsFinalizer(actualMd, clusterv1.MachineDeploymentTopologyFinalizer)).To(BeFalse()) g.Expect(actualMd.Spec.Template.ObjectMeta.Labels).To(HaveKeyWithValue("foo", "baz")) g.Expect(actualMd.Spec.Template.ObjectMeta.Labels).To(HaveKeyWithValue("fizz", "buzz")) diff --git a/internal/controllers/topology/machinedeployment/machinedeployment_controller.go b/internal/controllers/topology/machinedeployment/machinedeployment_controller.go index 7de0f63991d7..4d5bbf2d174a 100644 --- a/internal/controllers/topology/machinedeployment/machinedeployment_controller.go +++ b/internal/controllers/topology/machinedeployment/machinedeployment_controller.go @@ -21,6 +21,7 @@ import ( "github.com/pkg/errors" apierrors "k8s.io/apimachinery/pkg/api/errors" + kerrors "k8s.io/apimachinery/pkg/util/errors" "k8s.io/klog/v2" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" @@ -84,7 +85,7 @@ func (r *Reconciler) SetupWithManager(ctx context.Context, mgr ctrl.Manager, opt // // We don't have to set the finalizer, as it's already set during MachineDeployment creation // in the cluster topology controller. -func (r *Reconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { +func (r *Reconciler) Reconcile(ctx context.Context, req ctrl.Request) (_ ctrl.Result, reterr error) { log := ctrl.LoggerFrom(ctx) // Fetch the MachineDeployment instance. @@ -112,12 +113,25 @@ func (r *Reconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Resu return ctrl.Result{}, nil } + // Create a patch helper to add or remove the finalizer from the MachineDeployment. + patchHelper, err := patch.NewHelper(md, r.Client) + if err != nil { + return ctrl.Result{}, errors.Wrapf(err, "failed to create patch helper for %s", tlog.KObj{Obj: md}) + } + defer func() { + if err := patchHelper.Patch(ctx, md); err != nil { + reterr = kerrors.NewAggregate([]error{reterr, errors.Wrapf(err, "failed to patch %s", tlog.KObj{Obj: md})}) + } + }() + // Handle deletion reconciliation loop. if !md.ObjectMeta.DeletionTimestamp.IsZero() { return r.reconcileDelete(ctx, md) } - // Nothing to do. + // If the MachineDeployment is not being deleted ensure the finalizer is set. + controllerutil.AddFinalizer(md, clusterv1.MachineDeploymentTopologyFinalizer) + return ctrl.Result{}, nil } @@ -151,16 +165,8 @@ func (r *Reconciler) reconcileDelete(ctx context.Context, md *clusterv1.MachineD return ctrl.Result{}, err } - // Remove the finalizer so the MachineDeployment can be garbage collected by Kubernetes. - patchHelper, err := patch.NewHelper(md, r.Client) - if err != nil { - return ctrl.Result{}, errors.Wrapf(err, "failed to create patch helper for %s", tlog.KObj{Obj: md}) - } - controllerutil.RemoveFinalizer(md, clusterv1.MachineDeploymentTopologyFinalizer) - if err := patchHelper.Patch(ctx, md); err != nil { - return ctrl.Result{}, errors.Wrapf(err, "failed to patch %s", tlog.KObj{Obj: md}) - } + return ctrl.Result{}, nil } diff --git a/internal/controllers/topology/machinedeployment/machinedeployment_controller_test.go b/internal/controllers/topology/machinedeployment/machinedeployment_controller_test.go index 6298693b70d7..fbe00497a3ee 100644 --- a/internal/controllers/topology/machinedeployment/machinedeployment_controller_test.go +++ b/internal/controllers/topology/machinedeployment/machinedeployment_controller_test.go @@ -27,11 +27,85 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/client/fake" "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" + "sigs.k8s.io/controller-runtime/pkg/reconcile" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" "sigs.k8s.io/cluster-api/internal/test/builder" + "sigs.k8s.io/cluster-api/util" ) +func TestMachineDeploymentTopologyFinalizer(t *testing.T) { + cluster := builder.Cluster(metav1.NamespaceDefault, "fake-cluster").Build() + mdBT := builder.BootstrapTemplate(metav1.NamespaceDefault, "mdBT").Build() + mdIMT := builder.InfrastructureMachineTemplate(metav1.NamespaceDefault, "mdIMT").Build() + mdBuilder := builder.MachineDeployment(metav1.NamespaceDefault, "md"). + WithClusterName("fake-cluster"). + WithBootstrapTemplate(mdBT). + WithInfrastructureTemplate(mdIMT) + + md := mdBuilder.Build() + mdWithFinalizer := mdBuilder.Build() + mdWithFinalizer.Finalizers = []string{clusterv1.MachineDeploymentTopologyFinalizer} + mdWithDeletionTimestamp := mdBuilder.Build() + deletionTimestamp := metav1.Now() + mdWithDeletionTimestamp.DeletionTimestamp = &deletionTimestamp + + mdWithDeletionTimestampAndFinalizer := mdWithDeletionTimestamp.DeepCopy() + mdWithDeletionTimestampAndFinalizer.Finalizers = []string{clusterv1.MachineDeploymentTopologyFinalizer} + + testCases := []struct { + name string + md *clusterv1.MachineDeployment + expectFinalizer bool + }{ + { + name: "should add ClusterTopology finalizer to a MachineDeployment with no finalizer", + md: md, + expectFinalizer: true, + }, + { + name: "should retain ClusterTopology finalizer on MachineDeployment with finalizer", + md: mdWithFinalizer, + expectFinalizer: true, + }, + { + name: "should not add ClusterTopology finalizer on MachineDeployment with Deletion Timestamp and no finalizer ", + md: mdWithDeletionTimestamp, + expectFinalizer: false, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + g := NewWithT(t) + + fakeClient := fake.NewClientBuilder(). + WithScheme(fakeScheme). + WithObjects(tc.md, mdBT, mdIMT, cluster). + Build() + + mdr := &Reconciler{ + Client: fakeClient, + APIReader: fakeClient, + } + + _, err := mdr.Reconcile(ctx, reconcile.Request{ + NamespacedName: util.ObjectKey(tc.md), + }) + g.Expect(err).NotTo(HaveOccurred()) + + key := client.ObjectKey{Namespace: tc.md.Namespace, Name: tc.md.Name} + var actual clusterv1.MachineDeployment + g.Expect(mdr.Client.Get(ctx, key, &actual)).To(Succeed()) + if tc.expectFinalizer { + g.Expect(actual.Finalizers).To(ConsistOf(clusterv1.MachineDeploymentTopologyFinalizer)) + } else { + g.Expect(actual.Finalizers).To(BeEmpty()) + } + }) + } +} + func TestMachineDeploymentReconciler_ReconcileDelete(t *testing.T) { deletionTimeStamp := metav1.Now() @@ -98,7 +172,7 @@ func TestMachineDeploymentReconciler_ReconcileDelete(t *testing.T) { t.Run("Should not delete templates of a MachineDeployment when they are still in use in a MachineSet", func(t *testing.T) { g := NewWithT(t) - ms := builder.MachineSet(md.Namespace, "ms"). + ms := builder.MachineSet(md.Namespace, "md"). WithBootstrapTemplate(mdBT). WithInfrastructureTemplate(mdIMT). WithLabels(map[string]string{ diff --git a/internal/controllers/topology/machineset/machineset_controller.go b/internal/controllers/topology/machineset/machineset_controller.go index 7ecc75af59e6..58c526680107 100644 --- a/internal/controllers/topology/machineset/machineset_controller.go +++ b/internal/controllers/topology/machineset/machineset_controller.go @@ -23,6 +23,7 @@ import ( apierrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/types" + kerrors "k8s.io/apimachinery/pkg/util/errors" "k8s.io/klog/v2" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" @@ -86,7 +87,7 @@ func (r *Reconciler) SetupWithManager(ctx context.Context, mgr ctrl.Manager, opt // // We don't have to set the finalizer, as it's already set during MachineSet creation // in the MachineSet controller. -func (r *Reconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { +func (r *Reconciler) Reconcile(ctx context.Context, req ctrl.Request) (_ ctrl.Result, reterr error) { // Fetch the MachineSet instance. ms := &clusterv1.MachineSet{} if err := r.Client.Get(ctx, req.NamespacedName, ms); err != nil { @@ -119,12 +120,25 @@ func (r *Reconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Resu return ctrl.Result{}, nil } + // Create a patch helper to add or remove the finalizer from the MachineSet. + patchHelper, err := patch.NewHelper(ms, r.Client) + if err != nil { + return ctrl.Result{}, errors.Wrapf(err, "failed to create patch helper for %s", tlog.KObj{Obj: ms}) + } + defer func() { + if err := patchHelper.Patch(ctx, ms); err != nil { + reterr = kerrors.NewAggregate([]error{reterr, errors.Wrapf(err, "failed to patch %s", tlog.KObj{Obj: ms})}) + } + }() + // Handle deletion reconciliation loop. if !ms.ObjectMeta.DeletionTimestamp.IsZero() { return r.reconcileDelete(ctx, ms) } - // Nothing to do. + // If the MachineSet is not being deleted ensure the finalizer is set. + controllerutil.AddFinalizer(ms, clusterv1.MachineSetTopologyFinalizer) + return ctrl.Result{}, nil } @@ -172,14 +186,7 @@ func (r *Reconciler) reconcileDelete(ctx context.Context, ms *clusterv1.MachineS } // Remove the finalizer so the MachineSet can be garbage collected by Kubernetes. - patchHelper, err := patch.NewHelper(ms, r.Client) - if err != nil { - return ctrl.Result{}, errors.Wrapf(err, "failed to create patch helper for %s", tlog.KObj{Obj: ms}) - } controllerutil.RemoveFinalizer(ms, clusterv1.MachineSetTopologyFinalizer) - if err := patchHelper.Patch(ctx, ms); err != nil { - return ctrl.Result{}, errors.Wrapf(err, "failed to patch %s", tlog.KObj{Obj: ms}) - } return ctrl.Result{}, nil } diff --git a/internal/controllers/topology/machineset/machineset_controller_test.go b/internal/controllers/topology/machineset/machineset_controller_test.go index 49b9369efa33..166241931121 100644 --- a/internal/controllers/topology/machineset/machineset_controller_test.go +++ b/internal/controllers/topology/machineset/machineset_controller_test.go @@ -27,11 +27,99 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/client/fake" "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" + "sigs.k8s.io/controller-runtime/pkg/reconcile" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" "sigs.k8s.io/cluster-api/internal/test/builder" + "sigs.k8s.io/cluster-api/util" ) +func TestMachineSetTopologyFinalizer(t *testing.T) { + mdName := "md" + + msBT := builder.BootstrapTemplate(metav1.NamespaceDefault, "msBT").Build() + msIMT := builder.InfrastructureMachineTemplate(metav1.NamespaceDefault, "msIMT").Build() + + cluster := builder.Cluster(metav1.NamespaceDefault, "fake-cluster").Build() + msBuilder := builder.MachineSet(metav1.NamespaceDefault, "ms"). + WithBootstrapTemplate(msBT). + WithInfrastructureTemplate(msIMT). + WithClusterName(cluster.Name). + WithOwnerReferences([]metav1.OwnerReference{ + { + Kind: "MachineDeployment", + APIVersion: clusterv1.GroupVersion.String(), + Name: "md", + }, + }). + WithLabels(map[string]string{ + clusterv1.MachineDeploymentLabelName: mdName, + clusterv1.ClusterTopologyOwnedLabel: "", + }) + + ms := msBuilder.Build() + msWithFinalizer := msBuilder.Build() + msWithFinalizer.Finalizers = []string{clusterv1.MachineSetTopologyFinalizer} + msWithDeletionTimestamp := msBuilder.Build() + deletionTimestamp := metav1.Now() + msWithDeletionTimestamp.DeletionTimestamp = &deletionTimestamp + + msWithDeletionTimestampAndFinalizer := msWithDeletionTimestamp.DeepCopy() + msWithDeletionTimestampAndFinalizer.Finalizers = []string{clusterv1.MachineSetTopologyFinalizer} + + testCases := []struct { + name string + ms *clusterv1.MachineSet + expectFinalizer bool + }{ + { + name: "should add ClusterTopology finalizer to a MachineSet with no finalizer", + ms: ms, + expectFinalizer: true, + }, + { + name: "should retain ClusterTopology finalizer on MachineSet with finalizer", + ms: msWithFinalizer, + expectFinalizer: true, + }, + { + name: "should not add ClusterTopology finalizer on MachineSet with Deletion Timestamp and no finalizer ", + ms: msWithDeletionTimestamp, + expectFinalizer: false, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + g := NewWithT(t) + + fakeClient := fake.NewClientBuilder(). + WithScheme(fakeScheme). + WithObjects(tc.ms, msBT, msIMT, cluster). + Build() + + msr := &Reconciler{ + Client: fakeClient, + APIReader: fakeClient, + } + + _, err := msr.Reconcile(ctx, reconcile.Request{ + NamespacedName: util.ObjectKey(tc.ms), + }) + g.Expect(err).NotTo(HaveOccurred()) + + key := client.ObjectKey{Namespace: tc.ms.Namespace, Name: tc.ms.Name} + var actual clusterv1.MachineSet + g.Expect(msr.Client.Get(ctx, key, &actual)).To(Succeed()) + if tc.expectFinalizer { + g.Expect(actual.Finalizers).To(ConsistOf(clusterv1.MachineSetTopologyFinalizer)) + } else { + g.Expect(actual.Finalizers).To(BeEmpty()) + } + }) + } +} + func TestMachineSetReconciler_ReconcileDelete(t *testing.T) { deletionTimeStamp := metav1.Now() diff --git a/internal/test/builder/builders.go b/internal/test/builder/builders.go index 557acfac3710..4c6ac4370d8c 100644 --- a/internal/test/builder/builders.go +++ b/internal/test/builder/builders.go @@ -1249,7 +1249,7 @@ func (m *MachineDeploymentBuilder) Build() *clusterv1.MachineDeployment { return obj } -// MachineSetBuilder holds the variables and objects needed to build a generic MachineSet. +// MachineSetBuilder holds the variables and objects needed to build a MachineSet. type MachineSetBuilder struct { namespace string name string @@ -1257,6 +1257,8 @@ type MachineSetBuilder struct { infrastructureTemplate *unstructured.Unstructured replicas *int32 labels map[string]string + clusterName string + ownerRefs []metav1.OwnerReference } // MachineSet creates a MachineSetBuilder with the given name and namespace. @@ -1273,7 +1275,7 @@ func (m *MachineSetBuilder) WithBootstrapTemplate(ref *unstructured.Unstructured return m } -// WithInfrastructureTemplate adds the passed unstructured object to the MachineSet builder as an infrastructureMachineTemplate. +// WithInfrastructureTemplate adds the passed unstructured object to the MachineSetBuilder as an infrastructureMachineTemplate. func (m *MachineSetBuilder) WithInfrastructureTemplate(ref *unstructured.Unstructured) *MachineSetBuilder { m.infrastructureTemplate = ref return m @@ -1285,12 +1287,24 @@ func (m *MachineSetBuilder) WithLabels(labels map[string]string) *MachineSetBuil return m } -// WithReplicas sets the number of replicas for the MachineSetClassBuilder. +// WithReplicas sets the number of replicas for the MachineSetBuilder. func (m *MachineSetBuilder) WithReplicas(replicas *int32) *MachineSetBuilder { m.replicas = replicas return m } +// WithClusterName sets the number of replicas for the MachineSetBuilder. +func (m *MachineSetBuilder) WithClusterName(name string) *MachineSetBuilder { + m.clusterName = name + return m +} + +// WithOwnerReferences adds ownerReferences for the MachineSetBuilder. +func (m *MachineSetBuilder) WithOwnerReferences(ownerRefs []metav1.OwnerReference) *MachineSetBuilder { + m.ownerRefs = ownerRefs + return m +} + // Build creates a new MachineSet with the variables and objects passed to the MachineSetBuilder. func (m *MachineSetBuilder) Build() *clusterv1.MachineSet { obj := &clusterv1.MachineSet{ @@ -1299,11 +1313,13 @@ func (m *MachineSetBuilder) Build() *clusterv1.MachineSet { APIVersion: clusterv1.GroupVersion.String(), }, ObjectMeta: metav1.ObjectMeta{ - Name: m.name, - Namespace: m.namespace, - Labels: m.labels, + Name: m.name, + Namespace: m.namespace, + Labels: m.labels, + OwnerReferences: m.ownerRefs, }, } + obj.Spec.ClusterName = m.clusterName obj.Spec.Replicas = m.replicas if m.bootstrapTemplate != nil { obj.Spec.Template.Spec.Bootstrap.ConfigRef = objToRef(m.bootstrapTemplate) diff --git a/internal/test/builder/zz_generated.deepcopy.go b/internal/test/builder/zz_generated.deepcopy.go index 991349f5a20d..4c0d23c3c8fe 100644 --- a/internal/test/builder/zz_generated.deepcopy.go +++ b/internal/test/builder/zz_generated.deepcopy.go @@ -580,6 +580,13 @@ func (in *MachineSetBuilder) DeepCopyInto(out *MachineSetBuilder) { (*out)[key] = val } } + if in.ownerRefs != nil { + in, out := &in.ownerRefs, &out.ownerRefs + *out = make([]v1.OwnerReference, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MachineSetBuilder. From 5907dcb7f7f9b9c3a23a27c853ae8f56a48438cd Mon Sep 17 00:00:00 2001 From: Nahshon Unna-Tsameret Date: Sun, 23 Oct 2022 18:26:26 +0300 Subject: [PATCH 04/87] Add the quickstart details for KubeVirt Signed-off-by: Nahshon Unna-Tsameret --- docs/book/src/user/quick-start.md | 267 ++++++++++++++++++++++++++++-- 1 file changed, 253 insertions(+), 14 deletions(-) diff --git a/docs/book/src/user/quick-start.md b/docs/book/src/user/quick-start.md index 603ec4e31402..be43d80b7a51 100644 --- a/docs/book/src/user/quick-start.md +++ b/docs/book/src/user/quick-start.md @@ -61,7 +61,7 @@ a target [management cluster] on the selected [infrastructure provider]. The installation procedure depends on the version of kind; if you are planning to use the Docker infrastructure provider, please follow the additional instructions in the dedicated tab: - {{#tabs name:"install-kind" tabs:"Default,Docker"}} + {{#tabs name:"install-kind" tabs:"Default,Docker,KubeVirt"}} {{#tab Default}} Create the kind cluster: @@ -93,6 +93,57 @@ a target [management cluster] on the selected [infrastructure provider]. Then follow the instruction for your kind version using `kind create cluster --config kind-cluster-with-extramounts.yaml` to create the management cluster using the above file. + {{#/tab }} + {{#tab KubeVirt}} + + #### Create the Kind Cluster + [KubeVirt][KubeVirt] is a cloud native virtualization solution. The virtual machines we're going to create and use for + the workload cluster's nodes, are actually running within pods in the management cluster. In order to communicate with + the workload cluster's API server, we'll need to expose it. We are using Kind which is a limited environment. The + easiest way to expose the workload cluster's API server (a pod within a node running in a VM that is itself running + within a pod in the management cluster, that is running inside a docker container), is to use a LoadBalancer service. + + To allow using a LoadBalancer service, we can't use the kind's default CNI (kindnet), but we'll need to install + another CNI, like Calico. In order to do that, we'll need first to initiate the kind cluster with two modifications: + 1. Disable the default CNI + 2. Add the docker credentials to the cluster, to avoid the docker hub pull rate limit of the calico images; read more + about it in the [docker documentation](https://docs.docker.com/docker-hub/download-rate-limit/), and in the + [kind documentation](https://kind.sigs.k8s.io/docs/user/private-registries/#mount-a-config-file-to-each-node). + + Create a configuration file for kind. Please notice the docker config file path, and adjust it to your local setting: + ```bash + cat < kind-config.yaml + kind: Cluster + apiVersion: kind.x-k8s.io/v1alpha4 + networking: + # the default CNI will not be installed + disableDefaultCNI: true + nodes: + - role: control-plane + extraMounts: + - containerPath: /var/lib/kubelet/config.json + hostPath: + EOF + ``` + Now, create the kind cluster with the configuration file: + ```bash + kind create cluster --config=kind-config.yaml + ``` + Test to ensure the local kind cluster is ready: + ```bash + kubectl cluster-info + ``` + + #### Install the Calico CNI + Now we'll need to install a CNI. In this example, we're using calico, but other CNIs should work as well. Please see + [calico installation guide](https://projectcalico.docs.tigera.io/getting-started/kubernetes/self-managed-onprem/onpremises#install-calico) + for more details (use the "Manifest" tab). Below is an example of how to install calico version v3.24.4. + + Use the Calico manifest to create the required resources; e.g.: + ```bash + kubectl create -f https://raw.githubusercontent.com/projectcalico/calico/v3.24.4/manifests/calico.yaml + ``` + {{#/tab }} {{#/tabs }} @@ -202,7 +253,7 @@ Additional documentation about experimental features can be found in [Experiment Depending on the infrastructure provider you are planning to use, some additional prerequisites should be satisfied before getting started with Cluster API. See below for the expected settings for common providers. -{{#tabs name:"tab-installation-infrastructure" tabs:"AWS,Azure,CloudStack,DigitalOcean,Docker,Equinix Metal,GCP,Hetzner,IBM Cloud,KubeKey,Kubevirt,Metal3,Nutanix,OCI,OpenStack,Outscale,VCD,vcluster,Virtink,vSphere"}} +{{#tabs name:"tab-installation-infrastructure" tabs:"AWS,Azure,CloudStack,DigitalOcean,Docker,Equinix Metal,GCP,Hetzner,IBM Cloud,KubeKey,KubeVirt,Metal3,Nutanix,OCI,OpenStack,Outscale,VCD,vcluster,Virtink,vSphere"}} {{#tab AWS}} Download the latest binary of `clusterawsadm` from the [AWS provider releases]. @@ -442,9 +493,61 @@ clusterctl init --infrastructure kubekey ``` {{#/tab }} -{{#tab Kubevirt}} +{{#tab KubeVirt}} + +Please visit the [KubeVirt project][KubeVirt provider] for more information. -Please visit the [Kubevirt project][Kubevirt provider]. +As described above, we want to use a LoadBalancer service in order to expose the workload cluster's API server. In the +example below, we will use [MetalLB](https://metallb.universe.tf/) solution to implement load balancing to our kind +cluster. Other solution should work as well. + +#### Install MetalLB for load balancing +Install MetalLB, as described [here](https://metallb.universe.tf/installation/#installation-by-manifest); for example: +```bash +METALLB_VER=$(curl "https://api.github.com/repos/metallb/metallb/releases/latest" | jq -r ".tag_name") +kubectl apply -f "https://raw.githubusercontent.com/metallb/metallb/${METALLB_VER}/config/manifests/metallb-native.yaml" +kubectl wait pods -n metallb-system -l app=metallb,component=controller --for=condition=Ready --timeout=10m +kubectl wait pods -n metallb-system -l app=metallb,component=speaker --for=condition=Ready --timeout=2m +``` + +Now, we'll create the `IPAddressPool` and the `L2Advertisement` custom resources. The script below creates the CRs with +the right addresses, that match to the kind cluster addresses: +```bash +GW_IP=$(docker network inspect -f '{{range .IPAM.Config}}{{.Gateway}}{{end}}' kind) +NET_IP=$(echo ${GW_IP} | sed -E 's|^([0-9]+\.[0-9]+)\..*$|\1|g') +cat < Please visit the [KubeKey provider] for more information. {{#/tab }} -{{#tab Kubevirt}} - -A ClusterAPI compatible image must be available in your Kubevirt image library. For instructions on how to build a compatible image -see [image-builder](https://image-builder.sigs.k8s.io/capi/capi.html). +{{#tab KubeVirt}} -To see all required Kubevirt environment variables execute: ```bash -clusterctl generate cluster --infrastructure kubevirt --list-variables capi-quickstart +export CAPK_GUEST_K8S_VERSION="v1.23.10" +export CRI_PATH="/var/run/containerd/containerd.sock" +export NODE_VM_IMAGE_TEMPLATE="quay.io/capk/ubuntu-2004-container-disk:${CAPK_GUEST_K8S_VERSION}" ``` +Please visit the [KubeVirt project][KubeVirt provider] for more information. {{#/tab }} {{#tab Metal3}} @@ -1007,7 +1109,7 @@ For more information about prerequisites, credentials management, or permissions For the purpose of this tutorial, we'll name our cluster capi-quickstart. -{{#tabs name:"tab-clusterctl-config-cluster" tabs:"Docker, vcluster, others..."}} +{{#tabs name:"tab-clusterctl-config-cluster" tabs:"Docker, vcluster, KubeVirt, others..."}} {{#tab Docker}}